gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import os
import shutil
from textwrap import dedent
from traitlets import List, Bool
from .baseapp import BaseNbConvertApp, nbconvert_aliases, nbconvert_flags
from ..preprocessors import (
AssignLatePenalties, ClearOutput, DeduplicateIds, OverwriteCells, SaveAutoGrades, Execute, LimitOutput)
from ..api import Gradebook, MissingEntry
from .. import utils
aliases = {}
aliases.update(nbconvert_aliases)
aliases.update({
})
flags = {}
flags.update(nbconvert_flags)
flags.update({
'create': (
{'AutogradeApp': {'create_student': True}},
"Create an entry for the student in the database, if one does not already exist."
),
'no-execute': (
{
'Execute': {'enabled': False},
'ClearOutput': {'enabled': False}
},
"Don't execute notebooks and clear output when autograding."
),
})
class AutogradeApp(BaseNbConvertApp):
name = u'nbgrader-autograde'
description = u'Autograde a notebook by running it'
aliases = aliases
flags = flags
examples = """
Autograde submitted assignments. This takes one argument for the
assignment id, and then (by default) autogrades assignments from the
following directory structure:
submitted/*/{assignment_id}/*.ipynb
and saves the autograded files to the corresponding directory in:
autograded/{student_id}/{assignment_id}/{notebook_id}.ipynb
The student IDs must already exist in the database. If they do not, you
can tell `nbgrader autograde` to add them on the fly by passing the
--create flag.
Note that the assignment must also be present in the database. If it is
not, you should first create it using `nbgrader assign`. Then, during
autograding, the cells that contain tests for the students' answers will
be overwritten with the master version of the tests that is saved in the
database (this prevents students from modifying the tests in order to
improve their score).
To grade all submissions for "Problem Set 1":
nbgrader autograde "Problem Set 1"
To grade only the submission by student with ID 'Hacker':
nbgrader autograde "Problem Set 1" --student Hacker
To grade only the notebooks that start with '1':
nbgrader autograde "Problem Set 1" --notebook "1*"
By default, student submissions are re-executed and their output cleared.
For long running notebooks, it can be useful to disable this with the
'--no-execute' flag:
nbgrader autograde "Problem Set 1" --no-execute
Note, however, that doing so will not guarantee that students' solutions
are correct. If you use this flag, you should make sure you manually
check all solutions. For example, if a student saved their notebook with
all outputs cleared, then using --no-execute would result in them
receiving full credit on all autograded problems.
"""
create_student = Bool(
False, config=True,
help=dedent(
"""
Whether to create the student at runtime if it does not
already exist.
"""
)
)
_sanitizing = True
@property
def _input_directory(self):
if self._sanitizing:
return self.submitted_directory
else:
return self.autograded_directory
@property
def _output_directory(self):
return self.autograded_directory
export_format = 'notebook'
sanitize_preprocessors = List([
ClearOutput,
DeduplicateIds,
OverwriteCells
])
autograde_preprocessors = List([
Execute,
LimitOutput,
SaveAutoGrades,
AssignLatePenalties,
])
preprocessors = List([])
def _config_changed(self, name, old, new):
if 'create_student' in new.AutogradeApp:
self.log.warn(
"The AutogradeApp.create_student (or the --create flag) option is "
"deprecated. Please specify your assignments through the "
"`NbGrader.db_students` variable in your nbgrader config file."
)
del new.AutogradeApp.create_student
super(AutogradeApp, self)._config_changed(name, old, new)
def init_assignment(self, assignment_id, student_id):
super(AutogradeApp, self).init_assignment(assignment_id, student_id)
# try to get the student from the database, and throw an error if it
# doesn't exist
student = None
for s in self.db_students:
if s['id'] == student_id:
student = s.copy()
break
if student is not None:
del student['id']
self.log.info("Creating/updating student with ID '%s': %s", student_id, student)
gb = Gradebook(self.db_url)
gb.update_or_create_student(student_id, **student)
gb.db.close()
else:
self.fail("No student with ID '%s' exists in the config", student_id)
# try to read in a timestamp from file
src_path = self._format_source(assignment_id, student_id)
timestamp = self._get_existing_timestamp(src_path)
gb = Gradebook(self.db_url)
if timestamp:
submission = gb.update_or_create_submission(
assignment_id, student_id, timestamp=timestamp)
self.log.info("%s submitted at %s", submission, timestamp)
# if the submission is late, print out how many seconds late it is
if timestamp and submission.total_seconds_late > 0:
self.log.warning("%s is %s seconds late", submission, submission.total_seconds_late)
else:
submission = gb.update_or_create_submission(assignment_id, student_id)
gb.db.close()
# copy files over from the source directory
self.log.info("Overwriting files with master versions from the source directory")
dest_path = self._format_dest(assignment_id, student_id)
source_path = self._format_path(self.source_directory, '.', assignment_id)
source_files = utils.find_all_files(source_path, self.ignore + ["*.ipynb"])
# copy them to the build directory
for filename in source_files:
dest = os.path.join(dest_path, os.path.relpath(filename, source_path))
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
if os.path.exists(dest):
os.remove(dest)
self.log.info("Copying %s -> %s", filename, dest)
shutil.copy(filename, dest)
# ignore notebooks that aren't in the database
notebooks = []
gb = Gradebook(self.db_url)
for notebook in self.notebooks:
notebook_id = os.path.splitext(os.path.basename(notebook))[0]
try:
gb.find_notebook(notebook_id, assignment_id)
except MissingEntry:
self.log.warning("Skipping unknown notebook: %s", notebook)
continue
else:
notebooks.append(notebook)
gb.db.close()
self.notebooks = notebooks
def _init_preprocessors(self):
self.exporter._preprocessors = []
if self._sanitizing:
preprocessors = self.sanitize_preprocessors
else:
preprocessors = self.autograde_preprocessors
for pp in preprocessors:
self.exporter.register_preprocessor(pp)
def convert_single_notebook(self, notebook_filename):
self.log.info("Sanitizing %s", notebook_filename)
self._sanitizing = True
self._init_preprocessors()
super(AutogradeApp, self).convert_single_notebook(notebook_filename)
notebook_filename = os.path.join(self.writer.build_directory, os.path.basename(notebook_filename))
self.log.info("Autograding %s", notebook_filename)
self._sanitizing = False
self._init_preprocessors()
super(AutogradeApp, self).convert_single_notebook(notebook_filename)
self._sanitizing = True
| |
"""
A flexible and relatively robust implementation
of the Observer Pattern.
"""
import weakref
"""
----------------------
Internal Documentation
----------------------
Storage Structures:
registry : {
(notification, observable) : ObserverDict(
observer : method name
)
}
holds : {
(notification, observable, observer) : {
count=int,
notifications=[
(notification name, observable ref, data)
]
)
}
disabled : {
(notification, observable, observer) : count
}
"""
class NotificationCenter(object):
def __init__(self):
self._registry = {}
self._holds = {}
self._disabled = {}
# -----
# Basic
# -----
def addObserver(self, observer, methodName, notification=None, observable=None):
"""
Add an observer to this notification dispatcher.
* **observer** An object that can be referenced with weakref.
* **methodName** A string epresenting the method to be called
when the notification is posted.
* **notification** The notification that the observer should
be notified of. If this is None, all notifications for
the *observable* will be posted to *observer*.
* **observable** The object to observe. If this is None,
all notifications with the name provided as *notification*
will be posted to the *observer*.
If None is given for both *notification* and *observable*
**all** notifications posted will be sent to the method
given method of the observer.
The method that will be called as a result of the action
must accept a single *notification* argument. This will
be a :class:`Notification` object.
"""
if observable is not None:
observable = weakref.ref(observable)
observer = weakref.ref(observer)
key = (notification, observable)
if key not in self._registry:
self._registry[key] = ObserverDict()
assert observer not in self._registry[key], "An observer is only allowed to have one callback for a given notification + observable combination."
self._registry[key][observer] = methodName
def hasObserver(self, observer, notification, observable):
"""
Returns a boolean indicating if the **observer** is registered
for **notification** posted by **observable**. Either
*observable* or *notification* may be None.
"""
if observable is not None:
observable = weakref.ref(observable)
key = (notification, observable)
if key not in self._registry:
return False
observer = weakref.ref(observer)
return observer in self._registry[key]
def removeObserver(self, observer, notification, observable):
"""
Remove an observer from this notification dispatcher.
* **observer** A registered object.
* **notification** The notification that the observer was registered
to be notified of.
* **observable** The object being observed.
"""
if observable is not None:
observable = weakref.ref(observable)
key = (notification, observable)
if key not in self._registry:
return
observer = weakref.ref(observer)
if observer in self._registry[key]:
del self._registry[key][observer]
if not len(self._registry[key]):
del self._registry[key]
def postNotification(self, notification, observable, data=None):
assert notification is not None
assert observable is not None
observableRef = weakref.ref(observable)
# observer independent hold/disabled
# ----------------------------------
if self._holds or self._disabled:
holdDisabledPossibilities = (
# least specific -> most specific
# suspended for all
(None, None, None),
# suspended for this notification
(notification, None, None),
# suspended for this observer
(None, observableRef, None),
# suspended for this notification + observable
(notification, observableRef, None)
)
for key in holdDisabledPossibilities:
if key in self._disabled:
return
if key in self._holds:
n = (notification, observableRef, data)
if not self._holds[key]["notifications"] or self._holds[key]["notifications"][-1] != n:
self._holds[key]["notifications"].append(n)
return
# posting
# -------
notificationObj = Notification(notification, observableRef, data)
registryPossibilities = (
# most specific -> least specific
(notification, observableRef),
(notification, None),
(None, observableRef),
(None, None)
)
for key in registryPossibilities:
if key not in self._registry:
continue
for observerRef, methodName in list(self._registry[key].items()):
# observer specific hold/disabled
# -------------------------------
if self._holds or self._disabled:
holdDisabledPossibilities = (
# least specific -> most specific
# suspended for observer
(None, None, observerRef),
# suspended for notification + observer
(notification, None, observerRef),
# suspended for observable + observer
(None, observableRef, observerRef),
# suspended for notification + observable + observer
(notification, observableRef, observerRef)
)
disabled = False
if self._disabled:
for disableKey in holdDisabledPossibilities:
if disableKey in self._disabled:
disabled = True
break
if disabled:
continue
hold = False
if self._holds:
for holdKey in holdDisabledPossibilities:
if holdKey in self._holds:
hold = True
n = (notification, observableRef, data)
if not self._holds[holdKey]["notifications"] or self._holds[holdKey]["notifications"][-1] != n:
self._holds[holdKey]["notifications"].append(n)
break
if hold:
continue
# post
# ----
observer = observerRef()
if observer is None:
# dead ref.
# XXX: delete?
continue
callback = getattr(observer, methodName)
callback(notificationObj)
# ----
# Hold
# ----
def holdNotifications(self, observable=None, notification=None, observer=None):
"""
Hold all notifications posted to all objects observing
**notification** in **observable**.
* **observable** The object that the notification belongs to. This is optional.
If no *observable* is given, *all* *notifications* will be held.
* **notification** The name of the notification. This is optional.
If no *notification* is given, *all* notifications for *observable*
will be held.
* **observer** The specific observer to not hold notifications for.
If no *observer* is given, the appropriate notifications will be
held for all observers.
Held notifications will be posted after the matching *notification*
and *observable* have been passed to :meth:`Notification.releaseHeldNotifications`.
This object will retain a count of how many times it has been told to
hold notifications for *notification* and *observable*. It will not
post the notifications until the *notification* and *observable*
have been released the same number of times.
"""
if observable is not None:
observable = weakref.ref(observable)
if observer is not None:
observer = weakref.ref(observer)
key = (notification, observable, observer)
if key not in self._holds:
self._holds[key] = dict(count=0, notifications=[])
self._holds[key]["count"] += 1
def releaseHeldNotifications(self, observable=None, notification=None, observer=None):
"""
Release all held notifications posted to all objects observing
**notification** in **observable**.
* **observable** The object that the notification belongs to. This is optional.
* **notification** The name of the notification. This is optional.
* **observer** The observer. This is optional.
"""
if observable is not None:
observable = weakref.ref(observable)
if observer is not None:
observer = weakref.ref(observer)
key = (notification, observable, observer)
self._holds[key]["count"] -= 1
if self._holds[key]["count"] == 0:
notifications = self._holds[key]["notifications"]
del self._holds[key]
for notification, observableRef, data in notifications:
self.postNotification(notification, observableRef(), data)
def areNotificationsHeld(self, observable=None, notification=None, observer=None):
"""
Returns a boolean indicating if notifications posted to all objects observing
**notification** in **observable** are being held.
* **observable** The object that the notification belongs to. This is optional.
* **notification** The name of the notification. This is optional.
* **observer** The observer. This is optional.
"""
if observable is not None:
observable = weakref.ref(observable)
if observer is not None:
observer = weakref.ref(observer)
key = (notification, observable, observer)
return key in self._holds
# -------
# Disable
# -------
def disableNotifications(self, observable=None, notification=None, observer=None):
"""
Disable all posts of **notification** from **observable** posted
to **observer** observing.
* **observable** The object that the notification belongs to. This is optional.
If no *observable* is given, *all* *notifications* will be disabled for *observer*.
* **notification** The name of the notification. This is optional.
If no *notification* is given, *all* notifications for *observable*
will be disabled for *observer*.
* **observer** The specific observer to not send posts to. If no
*observer* is given, the appropriate notifications will not
be posted to any observers.
This object will retain a count of how many times it has been told to
disable notifications for *notification* and *observable*. It will not
enable new notifications until the *notification* and *observable*
have been released the same number of times.
"""
if observable is not None:
observable = weakref.ref(observable)
if observer is not None:
observer = weakref.ref(observer)
key = (notification, observable, observer)
if key not in self._disabled:
self._disabled[key] = 0
self._disabled[key] += 1
def enableNotifications(self, observable=None, notification=None, observer=None):
"""
Enable notifications posted to all objects observing
**notification** in **observable**.
* **observable** The object that the notification belongs to. This is optional.
* **notification** The name of the notification. This is optional.
* **observer** The observer. This is optional.
"""
if observable is not None:
observable = weakref.ref(observable)
if observer is not None:
observer = weakref.ref(observer)
key = (notification, observable, observer)
self._disabled[key] -= 1
if self._disabled[key] == 0:
del self._disabled[key]
def areNotificationsDisabled(self, observable=None, notification=None, observer=None):
"""
Returns a boolean indicating if notifications posted to all objects observing
**notification** in **observable** are disabled.
* **observable** The object that the notification belongs to. This is optional.
* **notification** The name of the notification. This is optional.
* **observer** The observer. This is optional.
"""
if observable is not None:
observable = weakref.ref(observable)
if observer is not None:
observer = weakref.ref(observer)
key = (notification, observable, observer)
return key in self._disabled
class Notification(object):
"""An object that wraps notification data."""
__slots__ = ("_name", "_objRef", "_data")
def __init__(self, name, objRef, data):
self._name = name
self._objRef = objRef
self._data = data
def __repr__(self):
return "<Notification: %s %s>" % (self.name, repr(self.object))
def _get_name(self):
return self._name
name = property(_get_name, doc="The notification name. A string.")
def _get_object(self):
if self._objRef is not None:
return self._objRef()
return None
object = property(_get_object, doc="The observable object the notification belongs to.")
def _get_data(self):
return self._data
data = property(_get_data, doc="Arbitrary data passed along with the notification. There is no set format for this data and there is not requirement that any data be present. Refer to the documentation for methods that are responsible for generating notifications for information about this data.")
class ObserverDict(dict):
"""An object for storing ordered observers."""
def __init__(self):
super(ObserverDict, self).__init__()
self._order = []
def keys(self):
return list(self._order)
def values(self):
return [self[key] for key in self]
def items(self):
return [(key, self[key]) for key in self]
def __iter__(self):
order = self._order
while order:
yield order[0]
order = order[1:]
def iterkeys(self):
return iter(self)
def itervalues(self):
for key in self:
yield self[key]
def iteritems(self):
for key in self:
yield (key, self[key])
def __delitem__(self, key):
super(ObserverDict, self).__delitem__(key)
self._order.remove(key)
def __setitem__(self, key, value):
if key in self:
del self[key]
super(ObserverDict, self).__setitem__(key, value)
self._order.append(key)
# -----
# Tests
# -----
class _TestObserver(object):
def notificationCallback(self, notification):
print(notification.name, notification.object.name)
class _TestObservable(object):
def __init__(self, center, name):
self.center = center
self.name = name
def postNotification(self, name):
self.center.postNotification(name, self)
def _testAddObserver():
"""
# notification, observable
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable)
>>> center.hasObserver(observer, "A", observable)
True
>>> center.hasObserver(observer, "B", observable)
False
# notification, no observable
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", None)
>>> center.hasObserver(observer, "A", None)
True
>>> center.hasObserver(observer, "A", observable)
False
# no notification, observable
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", None, observable)
>>> center.hasObserver(observer, None, observable)
True
>>> center.hasObserver(observer, "A", observable)
False
# no notification, no observable
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", None, None)
>>> center.hasObserver(observer, None, None)
True
>>> center.hasObserver(observer, "A", observable)
False
>>> center.hasObserver(observer, None, observable)
False
>>> center.hasObserver(observer, "A", None)
False
"""
def _testRemoveObserver():
"""
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable)
>>> center.removeObserver(observer, "A", observable)
>>> center.hasObserver(observer, "A", observable)
False
# notification, observable
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable)
>>> center.removeObserver(observer, "A", observable)
>>> center.hasObserver(observer, "A", observable)
False
# notification, no observable
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", None)
>>> center.removeObserver(observer, "A", None)
>>> center.hasObserver(observer, "A", None)
False
# no notification, observable
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", None, observable)
>>> center.removeObserver(observer, None, observable)
>>> center.hasObserver(observer, None, observable)
False
# no notification, no observable
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", None, None)
>>> center.removeObserver(observer, None, None)
>>> center.hasObserver(observer, None, None)
False
"""
def _testPostNotification():
"""
# notification, observable
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable1)
>>> center.postNotification("A", observable1)
A Observable1
>>> center.postNotification("A", observable2)
>>> center.postNotification("B", observable1)
>>> center.postNotification("B", observable2)
# notification, no observable
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", None)
>>> center.postNotification("A", observable1)
A Observable1
>>> center.postNotification("A", observable2)
A Observable2
>>> center.postNotification("B", observable1)
>>> center.postNotification("B", observable2)
# no notification, observable
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", None, observable1)
>>> center.postNotification("A", observable1)
A Observable1
>>> center.postNotification("A", observable2)
>>> center.postNotification("B", observable1)
B Observable1
>>> center.postNotification("B", observable2)
# no notification, no observable
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", None, None)
>>> center.postNotification("A", observable1)
A Observable1
>>> center.postNotification("A", observable2)
A Observable2
>>> center.postNotification("B", observable1)
B Observable1
>>> center.postNotification("B", observable2)
B Observable2
"""
def _testHoldNotifications():
"""
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable1)
>>> center.addObserver(observer, "notificationCallback", "B", observable1)
>>> center.addObserver(observer, "notificationCallback", "C", observable2)
# hold all notifications
>>> center.holdNotifications()
>>> observable1.postNotification("A")
>>> observable1.postNotification("A")
>>> observable1.postNotification("B")
>>> observable2.postNotification("C")
>>> center.releaseHeldNotifications()
A Observable1
B Observable1
C Observable2
# hold all notifications of a specific observable
>>> center.holdNotifications(observable=observable1)
>>> observable1.postNotification("A")
>>> observable1.postNotification("A")
>>> observable1.postNotification("B")
>>> observable2.postNotification("C")
C Observable2
>>> center.releaseHeldNotifications(observable=observable1)
A Observable1
B Observable1
# hold all notifications of a specific notification
>>> center.holdNotifications(notification="A")
>>> observable1.postNotification("A")
>>> observable1.postNotification("A")
>>> observable1.postNotification("B")
B Observable1
>>> observable2.postNotification("C")
C Observable2
>>> center.releaseHeldNotifications(notification="A")
A Observable1
"""
def _testAreNotificationsHeld():
"""
# all held
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable)
>>> center.holdNotifications()
>>> center.areNotificationsHeld()
True
>>> center.releaseHeldNotifications()
>>> center.areNotificationsHeld()
False
# observable off
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable1)
>>> center.addObserver(observer, "notificationCallback", "B", observable2)
>>> center.holdNotifications(observable=observable1)
>>> center.areNotificationsHeld(observable=observable1)
True
>>> center.areNotificationsHeld(observable=observable2)
False
>>> center.releaseHeldNotifications(observable=observable1)
>>> center.areNotificationsHeld(observable=observable1)
False
# notification off
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable)
>>> center.addObserver(observer, "notificationCallback", "B", observable)
>>> center.holdNotifications(notification="A")
>>> center.areNotificationsHeld(notification="A")
True
>>> center.areNotificationsHeld(notification="B")
False
>>> center.releaseHeldNotifications(notification="A")
>>> center.areNotificationsHeld(notification="A")
False
# observer off
>>> center = NotificationCenter()
>>> observable = _TestObservable(center, "Observable")
>>> observer1 = _TestObserver()
>>> observer2 = _TestObserver()
>>> center.addObserver(observer1, "notificationCallback", "A", observable)
>>> center.addObserver(observer2, "notificationCallback", "A", observable)
>>> center.holdNotifications(observer=observer1)
>>> center.areNotificationsHeld(observer=observer1)
True
>>> center.areNotificationsHeld(observer=observer2)
False
>>> center.releaseHeldNotifications(observer=observer1)
>>> center.areNotificationsHeld(observer=observer1)
False
"""
def _testDisableNotifications():
"""
# disable all notifications
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable1)
>>> center.addObserver(observer, "notificationCallback", "B", observable1)
>>> center.addObserver(observer, "notificationCallback", "C", observable2)
>>> center.disableNotifications()
>>> observable1.postNotification("A")
>>> observable1.postNotification("B")
>>> observable2.postNotification("C")
>>> center.enableNotifications()
>>> observable1.postNotification("A")
A Observable1
# disable all notifications for a specific observable
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable1)
>>> center.addObserver(observer, "notificationCallback", "B", observable1)
>>> center.addObserver(observer, "notificationCallback", "C", observable2)
>>> center.disableNotifications(observable=observable1)
>>> observable1.postNotification("A")
>>> observable1.postNotification("B")
>>> observable2.postNotification("C")
C Observable2
>>> center.enableNotifications(observable=observable1)
>>> observable1.postNotification("A")
A Observable1
# disable all notifications for a specific notification
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable1)
>>> center.addObserver(observer, "notificationCallback", "B", observable1)
>>> center.addObserver(observer, "notificationCallback", "C", observable2)
>>> center.disableNotifications(notification="A")
>>> observable1.postNotification("A")
>>> observable1.postNotification("B")
B Observable1
>>> observable2.postNotification("C")
C Observable2
>>> center.enableNotifications(notification="A")
>>> observable1.postNotification("A")
A Observable1
# disable all notifications for a specific observer
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable1)
>>> center.addObserver(observer, "notificationCallback", "B", observable1)
>>> center.addObserver(observer, "notificationCallback", "C", observable2)
>>> center.disableNotifications(observer=observer)
>>> observable1.postNotification("A")
>>> observable1.postNotification("B")
>>> observable2.postNotification("C")
>>> center.enableNotifications(observer=observer)
>>> observable1.postNotification("A")
A Observable1
"""
def _testAreNotificationsDisabled():
"""
# all off
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable1)
>>> center.addObserver(observer, "notificationCallback", "B", observable2)
>>> center.disableNotifications()
>>> center.areNotificationsDisabled()
True
>>> center.enableNotifications()
>>> center.areNotificationsDisabled()
False
# observable off
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable1)
>>> center.addObserver(observer, "notificationCallback", "B", observable2)
>>> center.disableNotifications(observable=observable1)
>>> center.areNotificationsDisabled(observable=observable1)
True
>>> center.areNotificationsDisabled(observable=observable2)
False
>>> center.enableNotifications(observable=observable1)
>>> center.areNotificationsDisabled(observable=observable1)
False
# notification off
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer = _TestObserver()
>>> center.addObserver(observer, "notificationCallback", "A", observable1)
>>> center.addObserver(observer, "notificationCallback", "B", observable2)
>>> center.disableNotifications(notification="A")
>>> center.areNotificationsDisabled(notification="A")
True
>>> center.areNotificationsDisabled(notification="B")
False
>>> center.enableNotifications(notification="A")
>>> center.areNotificationsDisabled(notification="A")
False
# observer off
>>> center = NotificationCenter()
>>> observable1 = _TestObservable(center, "Observable1")
>>> observable2 = _TestObservable(center, "Observable2")
>>> observer1 = _TestObserver()
>>> observer2 = _TestObserver()
>>> center.addObserver(observer1, "notificationCallback", "A", observable1)
>>> center.addObserver(observer2, "notificationCallback", "A", observable1)
>>> center.disableNotifications(observer=observer1)
>>> center.areNotificationsDisabled(observer=observer1)
True
>>> center.areNotificationsDisabled(observer=observer2)
False
>>> center.enableNotifications(observer=observer1)
>>> center.areNotificationsDisabled(observer=observer1)
False
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
#
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import os
import sys
from com.ziclix.python.sql import zxJDBC
from org.slf4j import LoggerFactory
from wherehows.common import Constant
import FileUtil
class HiveLoad:
def __init__(self, wh_etl_exec_id='0'):
self.logger = LoggerFactory.getLogger("%s[%s]" % (self.__class__.__name__, wh_etl_exec_id))
# set up connection
username = args[Constant.WH_DB_USERNAME_KEY]
password = args[Constant.WH_DB_PASSWORD_KEY]
JDBC_DRIVER = args[Constant.WH_DB_DRIVER_KEY]
JDBC_URL = args[Constant.WH_DB_URL_KEY]
self.conn_mysql = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER)
self.conn_cursor = self.conn_mysql.cursor()
if Constant.INNODB_LOCK_WAIT_TIMEOUT in args:
lock_wait_time = args[Constant.INNODB_LOCK_WAIT_TIMEOUT]
self.conn_cursor.execute("SET innodb_lock_wait_timeout = %s;" % lock_wait_time)
temp_dir = FileUtil.etl_temp_dir(args, "HIVE")
self.input_schema_file = os.path.join(temp_dir, args[Constant.HIVE_SCHEMA_CSV_FILE_KEY])
self.input_field_file = os.path.join(temp_dir, args[Constant.HIVE_FIELD_METADATA_KEY])
self.input_instance_file = os.path.join(temp_dir, args[Constant.HIVE_INSTANCE_CSV_FILE_KEY])
self.input_dependency_file = os.path.join(temp_dir, args[Constant.HIVE_DEPENDENCY_CSV_FILE_KEY])
self.db_id = args[Constant.JOB_REF_ID_KEY]
self.wh_etl_exec_id = args[Constant.WH_EXEC_ID_KEY]
def load_metadata(self):
load_cmd = """
DELETE FROM stg_dict_dataset WHERE db_id = {db_id};
LOAD DATA LOCAL INFILE '{source_file}'
INTO TABLE stg_dict_dataset
FIELDS TERMINATED BY '\Z' ESCAPED BY '\0'
(`name`, `schema`, properties, fields, urn, source, dataset_type, storage_type, @sample_partition_full_path, source_created_time, @source_modified_time)
SET db_id = {db_id},
source_modified_time=nullif(@source_modified_time,''),
sample_partition_full_path=nullif(@sample_partition_full_path,''),
is_active = TRUE,
wh_etl_exec_id = {wh_etl_exec_id};
-- SELECT COUNT(*) FROM stg_dict_dataset;
-- clear
DELETE FROM stg_dict_dataset where db_id = {db_id}
AND (length(`name`)) = 0
OR `name` like 'tmp\_%'
OR `name` like 't\_%'
;
update stg_dict_dataset
set location_prefix = substring_index(substring_index(urn, '/', 4), '/', -2) /* hive location_prefix is it's schema name*/
WHERE db_id = {db_id} and location_prefix is null;
update stg_dict_dataset
set parent_name = substring_index(substring_index(urn, '/', 4), '/', -1) /* hive parent_name is it's schema name*/
where db_id = {db_id} and parent_name is null;
-- insert into final table
INSERT INTO dict_dataset
( `name`,
`schema`,
schema_type,
fields,
properties,
urn,
source,
location_prefix,
parent_name,
storage_type,
ref_dataset_id,
is_active,
dataset_type,
hive_serdes_class,
is_partitioned,
partition_layout_pattern_id,
sample_partition_full_path,
source_created_time,
source_modified_time,
created_time,
wh_etl_exec_id,
db_id
)
select s.name, s.schema, s.schema_type, s.fields,
s.properties, s.urn,
s.source, s.location_prefix, s.parent_name,
s.storage_type, s.ref_dataset_id, s.is_active,
s.dataset_type, s.hive_serdes_class, s.is_partitioned,
s.partition_layout_pattern_id, s.sample_partition_full_path,
s.source_created_time, s.source_modified_time, UNIX_TIMESTAMP(now()),
s.wh_etl_exec_id, s.db_id
from stg_dict_dataset s
where s.db_id = {db_id}
on duplicate key update
`name`=s.name, `schema`=s.schema, schema_type=s.schema_type, fields=s.fields,
properties=s.properties, source=s.source, location_prefix=s.location_prefix, parent_name=s.parent_name,
storage_type=s.storage_type, ref_dataset_id=s.ref_dataset_id, is_active=s.is_active,
dataset_type=s.dataset_type, hive_serdes_class=s.hive_serdes_class, is_partitioned=s.is_partitioned,
partition_layout_pattern_id=s.partition_layout_pattern_id, sample_partition_full_path=s.sample_partition_full_path,
source_created_time=s.source_created_time, source_modified_time=s.source_modified_time,
modified_time=UNIX_TIMESTAMP(now()), wh_etl_exec_id=s.wh_etl_exec_id
;
-- handle deleted or renamed hive tables
DELETE ds from dict_dataset ds
where ds.db_id = {db_id} AND NOT EXISTS (select 1 from stg_dict_dataset where urn = ds.urn)
;
""".format(source_file=self.input_schema_file, db_id=self.db_id, wh_etl_exec_id=self.wh_etl_exec_id)
self.executeCommands(load_cmd)
self.logger.info("Load dataset metadata.")
def load_field(self):
"""
Load fields
:return:
"""
load_cmd = """
DELETE FROM stg_dict_field_detail WHERE db_id = {db_id};
LOAD DATA LOCAL INFILE '{source_file}'
INTO TABLE stg_dict_field_detail
FIELDS TERMINATED BY '\Z'
(urn, sort_id, parent_sort_id, parent_path, field_name, data_type,
@is_nullable, @default_value, @data_size, @namespace, @description)
SET db_id = {db_id}
, is_nullable=nullif(@is_nullable,'null')
, default_value=nullif(@default_value,'null')
, data_size=nullif(@data_size,'null')
, namespace=nullif(@namespace,'null')
, description=nullif(@description,'null')
, last_modified=now();
-- update stg_dict_field_detail dataset_id
update stg_dict_field_detail sf, dict_dataset d
set sf.dataset_id = d.id where sf.urn = d.urn
and sf.db_id = {db_id};
delete from stg_dict_field_detail
where db_id = {db_id} and dataset_id is null; -- remove if not match to dataset
-- delete old record if it does not exist in this load batch anymore (but have the dataset id)
-- join with dict_dataset to avoid right join using index. (using index will slow down the query)
create temporary table if not exists t_deleted_fields (primary key (field_id)) ENGINE=MyISAM
SELECT x.field_id
FROM (select dataset_id, field_name, parent_path from stg_dict_field_detail where db_id = {db_id}) s
RIGHT JOIN
( select dataset_id, field_id, field_name, parent_path from dict_field_detail
where dataset_id in (select dataset_id from stg_dict_field_detail where db_id = {db_id})
) x
ON s.dataset_id = x.dataset_id
AND s.field_name = x.field_name
AND s.parent_path <=> x.parent_path
WHERE s.field_name is null
; -- run time : ~2min
delete from dict_field_detail where field_id in (select field_id from t_deleted_fields);
-- update the old record if some thing changed. e.g. sort id changed
update dict_field_detail t join
(
select x.field_id, s.*
from stg_dict_field_detail s
join dict_field_detail x
on s.field_name = x.field_name
and s.parent_path <=> x.parent_path
and s.dataset_id = x.dataset_id
where s.db_id = {db_id}
and (x.sort_id <> s.sort_id
or x.parent_sort_id <> s.parent_sort_id
or x.data_type <> s.data_type
or x.data_size <> s.data_size or (x.data_size is null XOR s.data_size is null)
or x.data_precision <> s.data_precision or (x.data_precision is null XOR s.data_precision is null)
or x.is_nullable <> s.is_nullable or (x.is_nullable is null XOR s.is_nullable is null)
or x.is_partitioned <> s.is_partitioned or (x.is_partitioned is null XOR s.is_partitioned is null)
or x.is_distributed <> s.is_distributed or (x.is_distributed is null XOR s.is_distributed is null)
or x.default_value <> s.default_value or (x.default_value is null XOR s.default_value is null)
or x.namespace <> s.namespace or (x.namespace is null XOR s.namespace is null)
)
) p
on t.field_id = p.field_id
set t.sort_id = p.sort_id,
t.parent_sort_id = p.parent_sort_id,
t.data_type = p.data_type,
t.data_size = p.data_size,
t.data_precision = p.data_precision,
t.is_nullable = p.is_nullable,
t.is_partitioned = p.is_partitioned,
t.is_distributed = p.is_distributed,
t.default_value = p.default_value,
t.namespace = p.namespace,
t.modified = now();
-- insert new ones
CREATE TEMPORARY TABLE IF NOT EXISTS t_existed_field
( primary key (urn, sort_id, db_id) ) ENGINE=MyISAM
AS (
SELECT sf.urn, sf.sort_id, sf.db_id, count(*) field_count
FROM stg_dict_field_detail sf
JOIN dict_field_detail t
ON sf.dataset_id = t.dataset_id
AND sf.field_name = t.field_name
AND sf.parent_path <=> t.parent_path
WHERE sf.db_id = {db_id}
and sf.dataset_id IS NOT NULL
group by 1,2,3
);
insert ignore into dict_field_detail (
dataset_id, fields_layout_id, sort_id, parent_sort_id, parent_path,
field_name, namespace, data_type, data_size, is_nullable, default_value,
modified
)
select
sf.dataset_id, 0, sf.sort_id, sf.parent_sort_id, sf.parent_path,
sf.field_name, sf.namespace, sf.data_type, sf.data_size, sf.is_nullable, sf.default_value, now()
from stg_dict_field_detail sf
where sf.db_id = {db_id} and sf.dataset_id is not null
and (sf.urn, sf.sort_id, sf.db_id) not in (select urn, sort_id, db_id from t_existed_field)
;
analyze table dict_field_detail;
-- delete old record in staging field comment map
delete from stg_dict_dataset_field_comment where db_id = {db_id};
-- insert new field comments
insert into field_comments (
user_id, comment, created, modified, comment_crc32_checksum
)
select 0 user_id, description, now() created, now() modified, crc32(description) from
(
select sf.description
from stg_dict_field_detail sf left join field_comments fc
on sf.description = fc.comment
where sf.description is not null
and fc.id is null
and sf.db_id = {db_id}
group by 1 order by 1
) d;
analyze table field_comments;
-- insert field to comment map to staging
insert ignore into stg_dict_dataset_field_comment
select t.field_id field_id, fc.id comment_id, sf.dataset_id, {db_id}
from stg_dict_field_detail sf
join field_comments fc
on sf.description = fc.comment
join dict_field_detail t
on sf.dataset_id = t.dataset_id
and sf.field_name = t.field_name
and sf.parent_path <=> t.parent_path
where sf.db_id = {db_id};
-- have default comment, insert it set default to 0
insert ignore into dict_dataset_field_comment
select field_id, comment_id, dataset_id, 0 is_default from stg_dict_dataset_field_comment where field_id in (
select field_id from dict_dataset_field_comment
where field_id in (select field_id from stg_dict_dataset_field_comment)
and is_default = 1 ) and db_id = {db_id};
-- doesn't have this comment before, insert into it and set as default
insert ignore into dict_dataset_field_comment
select sd.field_id, sd.comment_id, sd.dataset_id, 1 from stg_dict_dataset_field_comment sd
left join dict_dataset_field_comment d
on d.field_id = sd.field_id
and d.comment_id = sd.comment_id
where d.comment_id is null
and sd.db_id = {db_id};
""".format(source_file=self.input_field_file, db_id=self.db_id)
self.executeCommands(load_cmd)
self.logger.info("Load dataset fields.")
def load_dataset_instance(self):
"""
Load dataset instance
:return:
"""
load_cmd = """
DELETE FROM stg_dict_dataset_instance WHERE db_id = {db_id};
LOAD DATA LOCAL INFILE '{source_file}'
INTO TABLE stg_dict_dataset_instance
FIELDS TERMINATED BY '\x1a' ESCAPED BY '\0'
(dataset_urn, deployment_tier, data_center, server_cluster, slice,
is_active, native_name, logical_name, version, instance_created_time,
schema_text, ddl_text, abstract_dataset_urn)
SET db_id = {db_id},
created_time=unix_timestamp(now()),
wh_etl_exec_id = {wh_etl_exec_id};
-- update dataset_id
update stg_dict_dataset_instance sdi, dict_dataset d
set sdi.dataset_id = d.id where sdi.abstract_dataset_urn = d.urn
and sdi.db_id = {db_id};
INSERT INTO dict_dataset_instance
( dataset_id,
db_id,
deployment_tier,
data_center,
server_cluster,
slice,
is_active,
native_name,
logical_name,
version,
version_sort_id,
schema_text,
ddl_text,
instance_created_time,
created_time,
wh_etl_exec_id
)
select s.dataset_id, s.db_id, s.deployment_tier, c.data_center, c.cluster,
s.slice, s.is_active, s.native_name, s.logical_name, s.version,
case when s.version regexp '[0-9]+\.[0-9]+\.[0-9]+'
then cast(substring_index(s.version, '.', 1) as unsigned) * 100000000 +
cast(substring_index(substring_index(s.version, '.', 2), '.', -1) as unsigned) * 10000 +
cast(substring_index(s.version, '.', -1) as unsigned)
else 0
end version_sort_id, s.schema_text, s.ddl_text,
s.instance_created_time, s.created_time, s.wh_etl_exec_id
from stg_dict_dataset_instance s join dict_dataset d on s.dataset_id = d.id
join cfg_database c on c.db_id = {db_id}
where s.db_id = {db_id}
on duplicate key update
deployment_tier=s.deployment_tier, data_center=s.data_center, server_cluster=s.server_cluster, slice=s.slice,
is_active=s.is_active, native_name=s.native_name, logical_name=s.logical_name, version=s.version,
schema_text=s.schema_text, ddl_text=s.ddl_text,
instance_created_time=s.instance_created_time, created_time=s.created_time, wh_etl_exec_id=s.wh_etl_exec_id
;
""".format(source_file=self.input_instance_file, db_id=self.db_id, wh_etl_exec_id=self.wh_etl_exec_id)
self.executeCommands(load_cmd)
self.logger.info("Load dataset instance.")
def load_dataset_dependencies(self):
"""
Load dataset dependencies
:return:
"""
load_cmd = """
DELETE FROM stg_cfg_object_name_map;
LOAD DATA LOCAL INFILE '{source_file}'
INTO TABLE stg_cfg_object_name_map
FIELDS TERMINATED BY '\x1a' ESCAPED BY '\0'
(object_type, object_sub_type, object_name, object_urn, map_phrase, is_identical_map,
mapped_object_type, mapped_object_sub_type, mapped_object_name, mapped_object_urn, description, @last_modified)
SET last_modified=now();
-- update source dataset_id
UPDATE stg_cfg_object_name_map s, dict_dataset d
SET s.object_dataset_id = d.id WHERE s.object_urn = d.urn;
-- update mapped dataset_id
UPDATE stg_cfg_object_name_map s, dict_dataset d
SET s.mapped_object_dataset_id = d.id WHERE s.mapped_object_urn = d.urn;
-- create to be deleted table
DROP TEMPORARY table IF EXISTS t_deleted_depend;
CREATE TEMPORARY TABLE t_deleted_depend ENGINE=MyISAM
AS (
SELECT DISTINCT c.obj_name_map_id
FROM cfg_object_name_map c LEFT JOIN stg_cfg_object_name_map s
ON c.object_dataset_id = s.object_dataset_id
and CASE WHEN c.mapped_object_dataset_id is not null
THEN c.mapped_object_dataset_id = s.mapped_object_dataset_id
ELSE c.mapped_object_name = s.mapped_object_name
END
WHERE s.object_name is not null
and c.object_dataset_id is not null
and c.map_phrase = 'depends on'
and c.object_type in ('dalids', 'hive'));
-- delete old dependencies
DELETE FROM cfg_object_name_map where obj_name_map_id in (
SELECT obj_name_map_id FROM t_deleted_depend
);
-- insert new depends
INSERT IGNORE INTO cfg_object_name_map
(
object_type,
object_sub_type,
object_name,
object_dataset_id,
map_phrase,
is_identical_map,
mapped_object_type,
mapped_object_sub_type,
mapped_object_name,
mapped_object_dataset_id,
description,
last_modified
)
SELECT s.object_type, s.object_sub_type, s.object_name, s.object_dataset_id, s.map_phrase, s.is_identical_map,
s.mapped_object_type, s.mapped_object_sub_type, s.mapped_object_name, s.mapped_object_dataset_id,
s.description, s.last_modified
FROM stg_cfg_object_name_map s LEFT JOIN cfg_object_name_map c
ON s.object_dataset_id is not null and s.object_dataset_id = c.object_dataset_id
and CASE WHEN s.mapped_object_dataset_id is not null
THEN s.mapped_object_dataset_id = c.mapped_object_dataset_id
ELSE s.mapped_object_name = c.mapped_object_name
END
WHERE c.object_name is null;
""".format(source_file=self.input_dependency_file)
# didn't load into final table for now
self.executeCommands(load_cmd)
self.logger.info("Load dataset dependencies.")
def executeCommands(self, commands):
for cmd in commands.split(";"):
self.logger.debug(cmd)
self.conn_cursor.execute(cmd)
self.conn_mysql.commit()
if __name__ == "__main__":
args = sys.argv[1]
l = HiveLoad(args[Constant.WH_EXEC_ID_KEY])
try:
l.load_metadata()
l.load_dataset_instance()
l.load_dataset_dependencies()
l.load_field()
except Exception as e:
l.logger.error(str(e))
finally:
l.conn_mysql.close()
| |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Widget bar models.
We have multiple models for the widget bar since each object can have it's
own rules what is mappable to it"""
from lib import base
from lib import factory
from lib.element import widget_bar
from lib.page.widget import admin_widget
from lib.constants import locator
from lib.constants import element
class _WidgetBar(base.Component):
"""Root class for all widget bars"""
def __init__(self, driver):
super(_WidgetBar, self).__init__(driver)
def get_active_widget_name(self):
"""In general multiple tabs are open. Here we get the name of the
active one.
Returns:
str
"""
active_widget = base.Button(self._driver, locator.WidgetBar.TAB_WIDGET)
return active_widget.text
class _ObjectWidgetBar(_WidgetBar):
"""Model for a generic object widget bar (e.g. each info widget is object
specific"""
def __init__(self, driver):
super(_ObjectWidgetBar, self).__init__(driver)
self.button_add_widget = base.Dropdown(driver,
locator.WidgetBar.BUTTON_ADD)
self.tab_info = base.Tab(self._driver, locator.WidgetBar.INFO)
def _get_widget(self, widget_name):
"""Adds an attribute, clicks on the relevant tab and returns relevant
widget"""
attr_name = "tab_" + widget_name
setattr(
self,
attr_name,
widget_bar.Tab(self._driver, factory.get_locator_widget(widget_name)))
getattr(self, attr_name).click()
return factory.get_cls_widget(widget_name)(self._driver)
def add_widget(self):
"""
Returns:
lib.element.widget_bar.AddWidget
"""
self.button_add_widget.click()
return widget_bar.AddWidget(self._driver)
def select_info(self):
"""Selects the info widget/tab. Note that each object has a different info
page"""
self.tab_info.click()
return factory.get_cls_widget(
self.__class__.__name__.lower(), is_info=True)(self._driver)
def select_controls(self):
"""
Returns:
lib.page.widget.generic_widget.Controls
"""
return self._get_widget(element.WidgetBar.CONTROLS)
def select_issues(self):
"""
Returns:
lib.page.widget.generic_widget.Issues
"""
return self._get_widget(element.WidgetBar.ISSUES)
def select_processes(self):
"""
Returns:
lib.page.widget.generic_widget.Processes
"""
return self._get_widget(element.WidgetBar.PROCESSES)
def select_data_assets(self):
"""
Returns:
lib.page.widget.generic_widget.DataAssets
"""
return self._get_widget(element.WidgetBar.DATA_ASSETS)
def select_systems(self):
"""
Returns:
lib.page.widget.generic_widget.Systems
"""
return self._get_widget(element.WidgetBar.SYSTEMS)
def select_products(self):
"""
Returns:
lib.page.widget.generic_widget.Products
"""
return self._get_widget(element.WidgetBar.PRODUCTS)
def select_projects(self):
"""
Returns:
lib.page.widget.generic_widget.Projects
"""
return self._get_widget(element.WidgetBar.PROJECTS)
def select_programs(self):
"""
Returns:
lib.page.widget.generic_widget.Programs
"""
return self._get_widget(element.WidgetBar.PROGRAMS)
class AdminDashboard(_WidgetBar):
"""A model representing widget bar as seen only on admin dashboard"""
def __init__(self, driver):
super(AdminDashboard, self).__init__(driver)
self.tab_people = widget_bar.Tab(self._driver,
locator.WidgetBar.ADMIN_PEOPLE)
self.tab_roles = widget_bar.Tab(self._driver,
locator.WidgetBar.ADMIN_ROLES)
self.tab_events = widget_bar.Tab(self._driver,
locator.WidgetBar.ADMIN_EVENTS)
self.tab_custom_attributes = widget_bar.Tab(
self._driver, locator.WidgetBar.ADMIN_CUSTOM_ATTRIBUTE)
def select_people(self):
"""
Returns:
lib.page.widget.admin_widget.People
"""
self.tab_people.click()
return admin_widget.People(self._driver)
def select_roles(self):
"""
Returns:
lib.page.widget.admin_widget.Roles
"""
self.tab_roles.click()
return admin_widget.Roles(self._driver)
def select_events(self):
"""
Returns:
lib.page.widget.admin_widget.People
"""
self.tab_events.click()
return admin_widget.Events(self._driver)
def select_custom_attributes(self):
"""
Returns:
lib.page.widget.admin_widget.CustomAttributes
"""
self.tab_custom_attributes.click()
return admin_widget.CustomAttributes(self._driver)
class Dashboard(_ObjectWidgetBar):
"""A model representing widget bar on user's dashboard"""
class Programs(_ObjectWidgetBar):
"""A model representing widget bar of the program object"""
class Workflows(_ObjectWidgetBar):
"""A model representing widget bar of the workflow object"""
class Audits(_ObjectWidgetBar):
"""A model representing widget bar of the audit object"""
class Assessments(_ObjectWidgetBar):
"""A model representing widget bar of the Assessments object"""
class Requests(_ObjectWidgetBar):
"""A model representing widget bar of the requests object"""
class Issues(_ObjectWidgetBar):
"""A model representing widget bar of the workflow object"""
class Regulations(_ObjectWidgetBar):
"""A model representing widget bar of the regulations object"""
class Policies(_ObjectWidgetBar):
"""A model representing widget bar of the policies object"""
class Standards(_ObjectWidgetBar):
"""A model representing widget bar of the standards object"""
class Contracts(_ObjectWidgetBar):
"""A model representing widget bar of the contract object"""
class Clauses(_ObjectWidgetBar):
"""A model representing widget bar of the clauses object"""
class Sections(_ObjectWidgetBar):
"""A model representing widget bar of the section object"""
class Controls(_ObjectWidgetBar):
"""A model representing widget bar of the controls object"""
class Objectives(_ObjectWidgetBar):
"""A model representing widget bar of the objectives object"""
class People(_ObjectWidgetBar):
"""A model representing widget bar of the people object"""
class OrgGroups(_ObjectWidgetBar):
"""A model representing widget bar of the OrgGroups object"""
class Vendors(_ObjectWidgetBar):
"""A model representing widget bar of the vendors object"""
class AccessGroups(_ObjectWidgetBar):
"""A model representing widget bar of the access group object"""
class Systems(_ObjectWidgetBar):
"""A model representing widget bar of the system object"""
class Processes(_ObjectWidgetBar):
"""A model representing widget bar of the process object"""
class DataAssets(_ObjectWidgetBar):
"""A model representing widget bar of the data asset object"""
class Products(_ObjectWidgetBar):
"""A model representing widget bar of the product object"""
class Projects(_ObjectWidgetBar):
"""A model representing widget bar of the project object"""
class Facilities(_ObjectWidgetBar):
"""A model representing widget bar of the facility object"""
class Markets(_ObjectWidgetBar):
"""A model representing widget bar of the market object"""
class Risks(_ObjectWidgetBar):
"""A model representing widget bar of the risk object"""
class Threats(_ObjectWidgetBar):
"""A model representing widget bar of the threat object"""
| |
#!/usr/bin/env python
from __future__ import division
from __future__ import with_statement
import numpy as np
#from pylab import ion
import matplotlib as mpl
#from matplotlib.path import Path
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
#import numexpr as ne
#from numba import autojit
import os
import sys
import time
import cPickle as pickle
import collections
from collections import deque
from multiprocessing import Process, Queue
from smartFormat import smartFormat
from genericUtils import wstdout, timestamp
__author__ = "J.L. Lanfranchi"
__email__ = "jll1062@phys.psu.edu"
__copyright__ = "Copyright 2014 J.L. Lanfranchi"
__credits__ = ["J.L. Lanfranchi"]
__license__ = """Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
#-- Turn live-interactive plotting on (makes updated plots appear animated)
#ion()
#-- Adjust the font used on the plots
font = {'family' : 'serif', 'weight' : 'normal', 'size' : 8}
mpl.rc('font', **font)
def coordsFromAbsDir(absdir):
nsteps = len(absdir)
offset = 1
xincr = zeros(nsteps+1, dtype=int)
yincr = ones(nsteps+1, dtype=int)
xincr[argwhere(absdir==0)+1] = 1
xincr[argwhere(absdir==2)+1] = -1
yincr[argwhere(absdir==1)+1] = 1
yincr[argwhere(absdir==3)+1] = -1
x = cumsum(xincr)
y = cumsum(yincr)
return x, y
def plotSnakeXY(x, y):
fig, ax = subplots()
plot(x,y,'r-o',linewidth=3,markersize=6)
plot(x[0],y[0],'ko',markersize=10)
#ax.set_xlim(min(x)-2, max(x)+2)
#ax.set_ylim(min(y)-2, max(y)+2)
axis('image')
for spine in ax.spines.itervalues():
spine.set_visible(False)
ax.set_xlim(min(x)-2, max(x)+2)
ax.set_ylim(min(y)-2, max(y)+2)
def plotSnakeAbsDir(absdir):
plotSnakeXY(coordsFromDir(absdir))
def plotSnakeCoord(coords):
x = []
y = []
for c in coords:
x.append(c[0])
y.append(c[1])
plotSnakeXY(x, y)
def newSnake1(nSteps=10):
#reldir = (random.random(nSteps)*2).astype(int)-1
reldir = random.randint(-1,2,nSteps)
absdir = mod(1+cumsum(reldir), 4)
x, y = coordsFromDir(absdir)
class Snake:
"""Self-avoiding random walk."""
def __init__(self, nsteps, validDirs=(-1,1), recordAfter=None):
#-- Use a deque as a circular buffer to store the coords
self.coords = deque(maxlen=nsteps+1)
[ self.coords.append((0,y)) for y in range(nsteps+1) ]
self.R2 = [nsteps**2]
#-- This is either -1 (points at most-recently-added element)
# or 0 (points at oldest element)
self.forward = True
self.c1 = -1
self.c2 = -2
self.c_end = 0
self.validDirs = validDirs
self.nValidDirs = len(validDirs)
if recordAfter == None:
self.recordAfter = nsteps
else:
self.recordAfter = recordAfter
self.reptateCount = 0
def plot(self):
if self.forward:
plotSnakeCoord(self.coords)
else:
rc = self.coords
rc.reverse()
plotSnakeCoord(rc)
def meanR2(self):
return np.mean(self.R2)
def reptate(self):
dx = self.coords[self.c1][0]-self.coords[self.c2][0]
if dx == 1:
previousDir = 0
elif dx == -1:
previousDir = 2
elif self.coords[self.c1][1]-self.coords[self.c2][1] == 1:
previousDir = 1
else:
previousDir = 3
proposedDir = (previousDir + \
self.validDirs[np.random.randint(0,self.nValidDirs)]) % 4
if proposedDir == 0:
proposedCoord = (self.coords[self.c1][0]+1,self.coords[self.c1][1])
elif proposedDir == 1:
proposedCoord = (self.coords[self.c1][0],self.coords[self.c1][1]+1)
elif proposedDir == 2:
proposedCoord = (self.coords[self.c1][0]-1,self.coords[self.c1][1])
else:
proposedCoord = (self.coords[self.c1][0],self.coords[self.c1][1]-1)
#-- Exchange head and tail of snake...
if proposedCoord in self.coords:
self.forward = not self.forward
if self.forward:
self.c1 = -1
self.c2 = -2
self.c_end = 0
else:
self.c1 = 0
self.c2 = 1
self.c_end = -1
if self.reptateCount % self.recordAfter == 0:
self.R2.append(self.R2[-1])
self.recordStats = False
#-- ... or prepand / append new coord
else:
if self.forward:
self.coords.append(proposedCoord)
else:
self.coords.appendleft(proposedCoord)
if self.reptateCount % self.recordAfter == 0:
self.R2.append(self.R2[-1])
self.R2.append((self.coords[self.c1][0]
-self.coords[self.c_end][0])**2+
(self.coords[self.c1][1]
-self.coords[self.c_end][1])**2)
self.recordStats = False
self.reptateCount += 1
formatDic = {'sigFigs': 5, 'demarc': "", 'threeSpacing': False, 'rightSep':""}
def powerLaw(x, power, multFact, offset):
return multFact*(x**power) + offset
def powerLawLatex(power, multFact=1, offset=0, pcov=None):
offsetStr = smartFormat(offset, alwaysShowSign=True, **formatDic)
if not (offsetStr[0] == "+" or offsetStr[0] == "-"):
offsetStr = "+" + offsetStr
latex = r"$" + smartFormat(multFact, **formatDic) + \
r" \cdot N^{" + smartFormat(power, **formatDic) + r"} " + \
offsetStr + \
r"$"
return latex
def exponential(x, expExponent, multFact=1):
return multFact * np.exp(np.array(x)*expExponent)
def exponentialLatex(expExponent, multFact=1, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}$"
return latex
def expPower(x, expExponent, powerLawExp, multFact):
x = np.array(x)
return multFact * np.exp(x*expExponent) * x**powerLawExp
def expPowerLatex(expExponent, powerLawExp, multFact, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}\cdot N^{" + smartFormat(powerLawExp, **formatDic) + \
r"}$"
return latex
class SimulationData:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Simulation:
def __init__(self):
self.sd = SimulationData()
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
self.stateFilename = os.path.basename(__file__) + ".pk" #"p7x28_state.pk"
print self.stateFilename
def saveState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'wb') as stateFile:
pickle.dump(self.sd, stateFile, -1)
def loadState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'rb') as stateFile:
self.sd = pickle.load(stateFile)
def runSimulation(self, targetSuccesses=10, stepsRange=(4,50),
plotting=False):
#-- Reset state variables for a new simulation run
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
timeLastSaved = time.time()
self.sd.targetSuccesses = targetSuccesses
self.sd.stepsInChains = range(stepsRange[0],stepsRange[1])
self.sd.allChainFinalCoords = []
self.sd.allMeanChainFinalCoords = []
self.sd.meanChainFinalCoords = []
self.sd.chainSquareLengthAvg = []
self.sd.successRatio = []
self.sd.timingAvg = []
if plotting:
self.fig1 = plt.figure(1)
self.fig1.clf()
self.ax1 = fig1.add_subplot(111)
line, = ax1.plot([], [], 'ko-', lw=2)
self.ax1.set_xlim(-20,20)
self.ax1.set_ylim(-20,20)
ax1.axis('image')
plt.draw()
for stepsThisChain in self.sd.stepsInChains:
startTime = time.time()
snake = Snake(stepsThisChain, validDirs=(-1,0,1))
#successfulChains = []
#chainSquareLengths = []
#chainFinalCoords = []
#meanChainFinalCoord = []
nSuccesses = 0
trialN = 0
while nSuccesses < self.sd.targetSuccesses:
trialN += 1
#-- Perform as many reptations as chain links, to
# help ensure an independent configuration
[ snake.reptate() for n in xrange(stepsThisChain) ]
nSuccesses += 1
if plotting:
snake.plot()
plt.draw()
time.sleep(0.005)
self.sd.chainSquareLengthAvg.append(snake.meanR2())
self.sd.timingAvg.append( (time.time()-startTime)/nSuccesses )
wstdout("\nstepsThisChain = " + str(stepsThisChain) + "\n")
wstdout(" time/success = " + str(self.sd.timingAvg[-1]) + "\n")
if (time.time() - timeLastSaved) > 60*5:
self.saveState()
timeLastSaved = time.time()
self.sd.allMeanChainFinalCoords = \
np.array(self.sd.allMeanChainFinalCoords)
self.sd.simulationCompleted = True
self.saveState()
def postproc(self):
"""Perform curve fitting to the data"""
#-- Update state
self.sd.postprocCompleted = False
#-- Check that simulation data is present
if not self.sd.simulationCompleted:
raise Exception("No simulation run; cannot perform curve fit!")
#-- Same x data is used for *all* the below curve fits
x = self.sd.stepsInChains
#============================================================
# Fit R_N^2 with const * power-law + const
#============================================================
y = self.sd.chainSquareLengthAvg
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt3, pcov3 = curve_fit(f=powerLaw, xdata=x, ydata=y, sigma=sigma)
self.sd.fit3 = powerLaw(x, *popt3)
self.sd.fit3eqn = powerLawLatex(*popt3)
print popt3, pcov3, "\n"
#============================================================
# Exponential * power-law fit to wall-clock time
#============================================================
y = self.sd.timingAvg
#-- Initial guess
p0 = (0.129, 0, 2.981e-3)
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt4, pcov4 = curve_fit(f=expPower, xdata=x, ydata=y, sigma=sigma,
p0=p0, )
self.sd.fit4 = expPower(x, *popt4)
self.sd.fit4eqn = expPowerLatex(*popt4)
print popt4, pcov4, "\n"
#-- Update state
self.sd.postprocCompleted = True
def plotResults(self, savePlot=True):
"""Plot the data and the fit curves"""
if not self.sd.simulationCompleted:
raise Exception("No simulation has been run; cannot plot results!")
if not self.sd.postprocCompleted:
self.postproc()
self.fig2 = plt.figure(2, figsize=(7,8), dpi=80)
self.fig2.clf()
self.ax23 = self.fig2.add_subplot(211)
self.ax23.plot(self.sd.stepsInChains, self.sd.chainSquareLengthAvg,
'bo', label="data", markersize=4)
self.ax23.plot(self.sd.stepsInChains, self.sd.fit3,
'r-', label=self.sd.fit3eqn, linewidth=2, alpha=0.75)
self.ax23.set_ylabel(r"$\langle R_N^2\rangle$")
self.ax23.grid(which='major', b=True)
self.ax23.legend(loc="upper left", fancybox=True, shadow=True)
self.ax24 = self.fig2.add_subplot(212)
self.ax24.plot(self.sd.stepsInChains, self.sd.timingAvg,
'bo', label="data", markersize=4)
self.ax24.plot(self.sd.stepsInChains, self.sd.fit4,
'r-', label=self.sd.fit4eqn, linewidth=2, alpha=0.75)
self.ax24.set_xlabel(r"Nmber of steps in walk, $N$")
self.ax24.set_ylabel("Wall-clock time per successful chain (s)")
self.ax24.set_yscale('log')
self.ax24.grid(which='major', b=True)
self.ax24.legend(loc="upper left", fancybox=True, shadow=True)
self.fig2.tight_layout()
if savePlot:
self.fig2.savefig(timestamp(t=False) + "_problem7x30_plots.pdf")
self.fig2.savefig(timestamp(t=False) + "_problem7x30_plots.png", dpi=120)
plt.show()
if __name__ == "__main__":
startTime = time.time()
#-- Instantiate the Simulation object
sim = Simulation()
#-- Try to load the sim data from any previous run; if no data saved
# to disk in the default location, run a new simulation
try:
sim.loadState()
except Exception as e:
print "Error({0}: {1}".format(e.errno, e.strerror)
#sim.runSimulation(targetSuccesses=10, stepsRange=(4,101))
sim.runSimulation(targetSuccesses=100, stepsRange=(5,500))
#-- *Always* perform post-processing and plotting (allows easy modification
# of the postprocessing (curve fitting) and plotting routines
# without needing to re-run the simulation, which can take hours)
sim.postproc()
sim.plotResults()
plt.show()
| |
__author__ = 'rolandh'
EDUPERSON_OID = "urn:oid:1.3.6.1.4.1.5923.1.1.1."
X500ATTR_OID = "urn:oid:2.5.4."
NOREDUPERSON_OID = "urn:oid:1.3.6.1.4.1.2428.90.1."
NETSCAPE_LDAP = "urn:oid:2.16.840.1.113730.3.1."
UCL_DIR_PILOT = 'urn:oid:0.9.2342.19200300.100.1.'
PKCS_9 = "urn:oid:1.2.840.113549.1.9.1."
UMICH = "urn:oid:1.3.6.1.4.1.250.1.57."
SCHAC = "urn:oid:1.3.6.1.4.1.25178.2."
MAP = {
"identifier": "urn:oasis:names:tc:SAML:2.0:attrname-format:uri",
"fro": {
EDUPERSON_OID+'2': 'eduPersonNickname',
EDUPERSON_OID+'9': 'eduPersonScopedAffiliation',
EDUPERSON_OID+'11': 'eduPersonAssurance',
EDUPERSON_OID+'10': 'eduPersonTargetedID',
EDUPERSON_OID+'4': 'eduPersonOrgUnitDN',
NOREDUPERSON_OID+'6': 'norEduOrgAcronym',
NOREDUPERSON_OID+'7': 'norEduOrgUniqueIdentifier',
NOREDUPERSON_OID+'4': 'norEduPersonLIN',
EDUPERSON_OID+'1': 'eduPersonAffiliation',
NOREDUPERSON_OID+'2': 'norEduOrgUnitUniqueNumber',
NETSCAPE_LDAP+'40': 'userSMIMECertificate',
NOREDUPERSON_OID+'1': 'norEduOrgUniqueNumber',
NETSCAPE_LDAP+'241': 'displayName',
UCL_DIR_PILOT+'37': 'associatedDomain',
EDUPERSON_OID+'6': 'eduPersonPrincipalName',
NOREDUPERSON_OID+'8': 'norEduOrgUnitUniqueIdentifier',
NOREDUPERSON_OID+'9': 'federationFeideSchemaVersion',
X500ATTR_OID+'53': 'deltaRevocationList',
X500ATTR_OID+'52': 'supportedAlgorithms',
X500ATTR_OID+'51': 'houseIdentifier',
X500ATTR_OID+'50': 'uniqueMember',
X500ATTR_OID+'19': 'physicalDeliveryOfficeName',
X500ATTR_OID+'18': 'postOfficeBox',
X500ATTR_OID+'17': 'postalCode',
X500ATTR_OID+'16': 'postalAddress',
X500ATTR_OID+'15': 'businessCategory',
X500ATTR_OID+'14': 'searchGuide',
EDUPERSON_OID+'5': 'eduPersonPrimaryAffiliation',
X500ATTR_OID+'12': 'title',
X500ATTR_OID+'11': 'ou',
X500ATTR_OID+'10': 'o',
X500ATTR_OID+'37': 'cACertificate',
X500ATTR_OID+'36': 'userCertificate',
X500ATTR_OID+'31': 'member',
X500ATTR_OID+'30': 'supportedApplicationContext',
X500ATTR_OID+'33': 'roleOccupant',
X500ATTR_OID+'32': 'owner',
NETSCAPE_LDAP+'1': 'carLicense',
PKCS_9+'1': 'email',
NETSCAPE_LDAP+'3': 'employeeNumber',
NETSCAPE_LDAP+'2': 'departmentNumber',
X500ATTR_OID+'39': 'certificateRevocationList',
X500ATTR_OID+'38': 'authorityRevocationList',
NETSCAPE_LDAP+'216': 'userPKCS12',
EDUPERSON_OID+'8': 'eduPersonPrimaryOrgUnitDN',
X500ATTR_OID+'9': 'street',
X500ATTR_OID+'8': 'st',
NETSCAPE_LDAP+'39': 'preferredLanguage',
EDUPERSON_OID+'7': 'eduPersonEntitlement',
X500ATTR_OID+'2': 'knowledgeInformation',
X500ATTR_OID+'7': 'l',
X500ATTR_OID+'6': 'c',
X500ATTR_OID+'5': 'serialNumber',
X500ATTR_OID+'4': 'sn',
UCL_DIR_PILOT+'60': 'jpegPhoto',
X500ATTR_OID+'65': 'pseudonym',
NOREDUPERSON_OID+'5': 'norEduPersonNIN',
UCL_DIR_PILOT+'3': 'mail',
UCL_DIR_PILOT+'25': 'dc',
X500ATTR_OID+'40': 'crossCertificatePair',
X500ATTR_OID+'42': 'givenName',
X500ATTR_OID+'43': 'initials',
X500ATTR_OID+'44': 'generationQualifier',
X500ATTR_OID+'45': 'x500UniqueIdentifier',
X500ATTR_OID+'46': 'dnQualifier',
X500ATTR_OID+'47': 'enhancedSearchGuide',
X500ATTR_OID+'48': 'protocolInformation',
X500ATTR_OID+'54': 'dmdName',
NETSCAPE_LDAP+'4': 'employeeType',
X500ATTR_OID+'22': 'teletexTerminalIdentifier',
X500ATTR_OID+'23': 'facsimileTelephoneNumber',
X500ATTR_OID+'20': 'telephoneNumber',
X500ATTR_OID+'21': 'telexNumber',
X500ATTR_OID+'26': 'registeredAddress',
X500ATTR_OID+'27': 'destinationIndicator',
X500ATTR_OID+'24': 'x121Address',
X500ATTR_OID+'25': 'internationaliSDNNumber',
X500ATTR_OID+'28': 'preferredDeliveryMethod',
X500ATTR_OID+'29': 'presentationAddress',
EDUPERSON_OID+'3': 'eduPersonOrgDN',
NOREDUPERSON_OID+'3': 'norEduPersonBirthDate',
UMICH+'57': 'labeledURI',
UCL_DIR_PILOT+'1': 'uid',
SCHAC+'1': 'schacMotherTongue',
SCHAC+'2': 'schacGender',
SCHAC+'3': 'schacDateOfBirth',
SCHAC+'4': 'schacPlaceOfBirth',
SCHAC+'5': 'schacCountryOfCitizenship',
SCHAC+'6': 'schacSn1',
SCHAC+'7': 'schacSn2',
SCHAC+'8': 'schacPersonalTitle',
SCHAC+'9': 'schacHomeOrganization',
SCHAC+'10': 'schacHomeOrganizationType',
SCHAC+'11': 'schacCountryOfResidence',
SCHAC+'12': 'schacUserPresenceID',
SCHAC+'13': 'schacPersonalPosition',
SCHAC+'14': 'schacPersonalUniqueCode',
SCHAC+'15': 'schacPersonalUniqueID',
SCHAC+'17': 'schacExpiryDate',
SCHAC+'18': 'schacUserPrivateAttribute',
SCHAC+'19': 'schacUserStatus',
SCHAC+'20': 'schacProjectMembership',
SCHAC+'21': 'schacProjectSpecificRole',
},
"to": {
'roleOccupant': X500ATTR_OID+'33',
'gn': X500ATTR_OID+'42',
'norEduPersonNIN': NOREDUPERSON_OID+'5',
'title': X500ATTR_OID+'12',
'facsimileTelephoneNumber': X500ATTR_OID+'23',
'mail': UCL_DIR_PILOT+'3',
'postOfficeBox': X500ATTR_OID+'18',
'fax': X500ATTR_OID+'23',
'telephoneNumber': X500ATTR_OID+'20',
'norEduPersonBirthDate': NOREDUPERSON_OID+'3',
'rfc822Mailbox': UCL_DIR_PILOT+'3',
'dc': UCL_DIR_PILOT+'25',
'countryName': X500ATTR_OID+'6',
'emailAddress': PKCS_9+'1',
'employeeNumber': NETSCAPE_LDAP+'3',
'organizationName': X500ATTR_OID+'10',
'eduPersonAssurance': EDUPERSON_OID+'11',
'norEduOrgAcronym': NOREDUPERSON_OID+'6',
'registeredAddress': X500ATTR_OID+'26',
'physicalDeliveryOfficeName': X500ATTR_OID+'19',
'associatedDomain': UCL_DIR_PILOT+'37',
'l': X500ATTR_OID+'7',
'stateOrProvinceName': X500ATTR_OID+'8',
'federationFeideSchemaVersion': NOREDUPERSON_OID+'9',
'pkcs9email': PKCS_9+'1',
'givenName': X500ATTR_OID+'42',
'givenname': X500ATTR_OID+'42',
'x500UniqueIdentifier': X500ATTR_OID+'45',
'eduPersonNickname': EDUPERSON_OID+'2',
'houseIdentifier': X500ATTR_OID+'51',
'street': X500ATTR_OID+'9',
'supportedAlgorithms': X500ATTR_OID+'52',
'preferredLanguage': NETSCAPE_LDAP+'39',
'postalAddress': X500ATTR_OID+'16',
'email': PKCS_9+'1',
'norEduOrgUnitUniqueIdentifier': NOREDUPERSON_OID+'8',
'eduPersonPrimaryOrgUnitDN': EDUPERSON_OID+'8',
'c': X500ATTR_OID+'6',
'teletexTerminalIdentifier': X500ATTR_OID+'22',
'o': X500ATTR_OID+'10',
'cACertificate': X500ATTR_OID+'37',
'telexNumber': X500ATTR_OID+'21',
'ou': X500ATTR_OID+'11',
'initials': X500ATTR_OID+'43',
'eduPersonOrgUnitDN': EDUPERSON_OID+'4',
'deltaRevocationList': X500ATTR_OID+'53',
'norEduPersonLIN': NOREDUPERSON_OID+'4',
'supportedApplicationContext': X500ATTR_OID+'30',
'eduPersonEntitlement': EDUPERSON_OID+'7',
'generationQualifier': X500ATTR_OID+'44',
'eduPersonAffiliation': EDUPERSON_OID+'1',
'edupersonaffiliation': EDUPERSON_OID+'1',
'eduPersonPrincipalName': EDUPERSON_OID+'6',
'edupersonprincipalname': EDUPERSON_OID+'6',
'localityName': X500ATTR_OID+'7',
'owner': X500ATTR_OID+'32',
'norEduOrgUnitUniqueNumber': NOREDUPERSON_OID+'2',
'searchGuide': X500ATTR_OID+'14',
'certificateRevocationList': X500ATTR_OID+'39',
'organizationalUnitName': X500ATTR_OID+'11',
'userCertificate': X500ATTR_OID+'36',
'preferredDeliveryMethod': X500ATTR_OID+'28',
'internationaliSDNNumber': X500ATTR_OID+'25',
'uniqueMember': X500ATTR_OID+'50',
'departmentNumber': NETSCAPE_LDAP+'2',
'enhancedSearchGuide': X500ATTR_OID+'47',
'userPKCS12': NETSCAPE_LDAP+'216',
'eduPersonTargetedID': EDUPERSON_OID+'10',
'norEduOrgUniqueNumber': NOREDUPERSON_OID+'1',
'x121Address': X500ATTR_OID+'24',
'destinationIndicator': X500ATTR_OID+'27',
'eduPersonPrimaryAffiliation': EDUPERSON_OID+'5',
'surname': X500ATTR_OID+'4',
'jpegPhoto': UCL_DIR_PILOT+'60',
'eduPersonScopedAffiliation': EDUPERSON_OID+'9',
'edupersonscopedaffiliation': EDUPERSON_OID+'9',
'protocolInformation': X500ATTR_OID+'48',
'knowledgeInformation': X500ATTR_OID+'2',
'employeeType': NETSCAPE_LDAP+'4',
'userSMIMECertificate': NETSCAPE_LDAP+'40',
'member': X500ATTR_OID+'31',
'streetAddress': X500ATTR_OID+'9',
'dmdName': X500ATTR_OID+'54',
'postalCode': X500ATTR_OID+'17',
'pseudonym': X500ATTR_OID+'65',
'dnQualifier': X500ATTR_OID+'46',
'crossCertificatePair': X500ATTR_OID+'40',
'eduPersonOrgDN': EDUPERSON_OID+'3',
'authorityRevocationList': X500ATTR_OID+'38',
'displayName': NETSCAPE_LDAP+'241',
'businessCategory': X500ATTR_OID+'15',
'serialNumber': X500ATTR_OID+'5',
'norEduOrgUniqueIdentifier': NOREDUPERSON_OID+'7',
'st': X500ATTR_OID+'8',
'carLicense': NETSCAPE_LDAP+'1',
'presentationAddress': X500ATTR_OID+'29',
'sn': X500ATTR_OID+'4',
'domainComponent': UCL_DIR_PILOT+'25',
'labeledURI': UMICH+'57',
'uid': UCL_DIR_PILOT+'1',
'schacMotherTongue':SCHAC+'1',
'schacGender': SCHAC+'2',
'schacDateOfBirth':SCHAC+'3',
'schacPlaceOfBirth': SCHAC+'4',
'schacCountryOfCitizenship':SCHAC+'5',
'schacSn1': SCHAC+'6',
'schacSn2': SCHAC+'7',
'schacPersonalTitle':SCHAC+'8',
'schacHomeOrganization': SCHAC+'9',
'schacHomeOrganizationType': SCHAC+'10',
'schacCountryOfResidence': SCHAC+'11',
'schacUserPresenceID': SCHAC+'12',
'schacPersonalPosition': SCHAC+'13',
'schacPersonalUniqueCode': SCHAC+'14',
'schacPersonalUniqueID': SCHAC+'15',
'schacExpiryDate': SCHAC+'17',
'schacUserPrivateAttribute': SCHAC+'18',
'schacUserStatus': SCHAC+'19',
'schacProjectMembership': SCHAC+'20',
'schacProjectSpecificRole': SCHAC+'21',
}
}
| |
# -*- coding: utf-8 -*-
import os
import json
import posixpath
from seaserv import seafile_api
from django.urls import reverse
from seahub.test_utils import BaseTestCase
from seahub.utils import check_filename_with_rename
from seahub.utils.file_revisions import get_all_file_revisions
from tests.common.utils import randstring
try:
from seahub.settings import LOCAL_PRO_DEV_ENV
except ImportError:
LOCAL_PRO_DEV_ENV = False
class FileViewTest(BaseTestCase):
def create_new_repo(self):
new_repo_id = seafile_api.create_repo(name='test-repo-2', desc='',
username=self.user.username, passwd=None)
return new_repo_id
def admin_create_new_repo(self):
new_repo_id = seafile_api.create_repo(name='test-repo-2', desc='',
username=self.admin.username, passwd=None)
return new_repo_id
def get_lib_file_name(self, repo_id):
url = reverse('list_lib_dir', args=[repo_id])
resp = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
json_resp = json.loads(resp.content)
if len(json_resp['dirent_list']) > 0:
for dirent in json_resp['dirent_list']:
if 'is_file' in dirent and dirent['is_file']:
return dirent['obj_name']
else:
continue
return None
def setUp(self):
self.user_name = self.user.username
self.admin_name = self.admin.username
self.repo_id = self.repo.id
self.file_path = self.file
self.file_name = os.path.basename(self.file_path)
self.folder_path = self.folder
self.url = reverse('api-v2.1-file-view', args=[self.repo_id])
def tearDown(self):
self.remove_repo()
# for test http GET request
def test_can_get_file_info(self):
self.login_as(self.user)
resp = self.client.get(self.url + '?p=' + self.file_path)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert self.file_name == json_resp['obj_name']
def test_get_file_info_with_invalid_perm(self):
# login as admin, then visit user's file
self.login_as(self.admin)
resp = self.client.get(self.url + '?p=' + self.file_path)
self.assertEqual(403, resp.status_code)
# for test http POST request
def test_post_operation_invalid(self):
self.login_as(self.user)
data = {'operation': 'invalid',}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(400, resp.status_code)
def test_can_create_file(self):
self.login_as(self.user)
# delete old file
resp = self.client.delete(self.url + '?p=' + self.file_path,
{}, 'application/x-www-form-urlencoded')
assert None == self.get_lib_file_name(self.repo_id)
new_name = randstring(6)
new_file_path = '/' + new_name
data = {'operation': 'create',}
# create file
resp = self.client.post(self.url + '?p=' + new_file_path, data)
self.assertEqual(200, resp.status_code)
# check new file in repo
assert new_name == self.get_lib_file_name(self.repo_id)
def test_can_create_same_name_file(self):
self.login_as(self.user)
file_name = os.path.basename(self.file_path.rstrip('/'))
new_name = check_filename_with_rename(self.repo_id, '/', file_name)
data = {'operation': 'create',}
# create file
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
# check new folder has been created
assert new_name == json_resp['obj_name']
def test_create_file_with_invalid_repo_perm(self):
# login as admin, then create file in user's repo
self.login_as(self.admin)
new_name = randstring(6)
new_file_path = '/' + new_name
data = {'operation': 'create',}
resp = self.client.post(self.url + '?p=' + new_file_path, data)
self.assertEqual(403, resp.status_code)
def test_create_file_with_invalid_folder_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.repo_id, self.user_name,
self.admin_name, 'rw')
# set sub-folder permisson as 'r' for admin
seafile_api.add_folder_user_perm(self.repo_id,
self.folder_path, 'r', self.admin_name)
# admin can visit sub-folder with 'r' permission
assert seafile_api.check_permission_by_path(self.repo_id,
self.folder_path, self.admin_name) == 'r'
# login as admin, then create file in a 'r' permission folder
self.login_as(self.admin)
new_name = randstring(6)
new_file_path = posixpath.join(self.folder_path, new_name)
data = {'operation': 'create',}
resp = self.client.post(self.url + '?p=' + new_file_path, data)
self.assertEqual(403, resp.status_code)
def test_can_rename_file(self):
self.login_as(self.user)
new_name = randstring(6)
# check old file exist
assert self.file_name == self.get_lib_file_name(self.repo_id)
data = {'operation': 'rename', 'newname': new_name}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(200, resp.status_code)
# check old file has been renamed to new_name
assert new_name == self.get_lib_file_name(self.repo_id)
def test_rename_file_with_invalid_name(self):
self.login_as(self.user)
# check old file exist
assert self.file_name == self.get_lib_file_name(self.repo_id)
data = {'operation': 'rename', 'newname': '123/456'}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(400, resp.status_code)
def test_can_rename_file_with_same_name(self):
self.login_as(self.user)
# check old file exist
assert self.file_name == self.get_lib_file_name(self.repo_id)
# create a new file
new_name = randstring(6)
data = {'operation': 'create',}
resp = self.client.post(self.url + '?p=/' + new_name, data)
self.assertEqual(200, resp.status_code)
# rename new file with the same of the old file
old_file_name = self.file_name
checked_name = check_filename_with_rename(self.repo_id,
'/', old_file_name)
data = {'operation': 'rename', 'newname': checked_name}
resp = self.client.post(self.url + '?p=/' + new_name, data)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert checked_name == json_resp['obj_name']
def test_rename_file_with_invalid_repo_perm(self):
# login as admin, then rename file in user's repo
self.login_as(self.admin)
new_name = randstring(6)
data = {'operation': 'rename', 'newname': new_name}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(403, resp.status_code)
def test_rename_file_with_invalid_folder_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# create a file as old file in user repo sub-folder
old_file_name = randstring(6)
seafile_api.post_empty_file(repo_id=self.repo_id,
parent_dir=self.folder_path, filename=old_file_name,
username=self.user_name)
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.repo_id, self.user_name,
self.admin_name, 'rw')
# set sub-folder permisson as 'r' for admin
seafile_api.add_folder_user_perm(self.repo_id,
self.folder_path, 'r', self.admin_name)
# admin can visit old file with 'r' permission
old_file_path = posixpath.join(self.folder_path, old_file_name)
assert seafile_api.check_permission_by_path(self.repo_id,
old_file_path, self.admin_name) == 'r'
# login as admin, then rename a 'r' permission old file
self.login_as(self.admin)
new_name = randstring(6)
data = {'operation': 'rename', 'newname': new_name}
resp = self.client.post(self.url + '?p=' + old_file_path, data)
self.assertEqual(403, resp.status_code)
def test_can_move_file(self):
self.login_as(self.user)
# check old file name exist
assert self.file_name == self.get_lib_file_name(self.repo_id)
# move file
dst_repo_id = self.create_new_repo()
data = {
'operation': 'move',
'dst_repo': dst_repo_id,
'dst_dir': '/',
}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(200, resp.status_code)
# check old file has been delete
assert self.get_lib_file_name(self.repo_id) == None
# check old file has been moved to dst repo
assert self.file_name == self.get_lib_file_name(dst_repo_id)
self.remove_repo(dst_repo_id)
def test_move_file_with_invalid_src_repo_perm(self):
# login as admin, then move file in user's repo
self.login_as(self.admin)
dst_repo_id = self.admin_create_new_repo()
data = {
'operation': 'move',
'dst_repo': dst_repo_id,
'dst_dir': '/',
}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(403, resp.status_code)
def test_move_file_with_invalid_src_folder_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# create a file as old file in user repo sub-folder
old_file_name = randstring(6)
seafile_api.post_empty_file(repo_id=self.repo_id,
parent_dir=self.folder_path, filename=old_file_name,
username=self.user_name)
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.repo_id, self.user_name,
self.admin_name, 'rw')
# set sub-folder permisson as 'r' for admin
seafile_api.add_folder_user_perm(self.repo_id,
self.folder_path, 'r', self.admin_name)
# admin can visit old file with 'r' permission
old_file_path = posixpath.join(self.folder_path, old_file_name)
assert seafile_api.check_permission_by_path(self.repo_id,
old_file_path, self.admin_name) == 'r'
# login as admin, then move a 'r' permission file
self.login_as(self.admin)
dst_repo_id = self.admin_create_new_repo()
data = {
'operation': 'move',
'dst_repo': dst_repo_id,
'dst_dir': '/',
}
resp = self.client.post(self.url + '?p=' + old_file_path, data)
self.assertEqual(403, resp.status_code)
def test_move_file_with_invalid_dst_repo_perm(self):
# login as user, then move file to admin's repo
self.login_as(self.user)
# create new repo for admin
dst_repo_id = self.admin_create_new_repo()
data = {
'operation': 'move',
'dst_repo': dst_repo_id,
'dst_dir': '/',
}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(403, resp.status_code)
def test_move_file_with_invalid_dst_folder_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.repo_id, self.user_name,
self.admin_name, 'rw')
# set sub-folder permisson as 'r' for admin
seafile_api.add_folder_user_perm(self.repo_id,
self.folder_path, 'r', self.admin_name)
# admin can visit sub-folder with 'r' permission
assert seafile_api.check_permission_by_path(self.repo_id,
self.folder_path, self.admin_name) == 'r'
# create a file for admin repo
admin_repo_id = self.admin_create_new_repo()
admin_file_name = randstring(6)
seafile_api.post_empty_file(repo_id=admin_repo_id,
parent_dir='/', filename=admin_file_name,
username=self.admin_name)
# login as admin, then move file to a 'r' permission folder
self.login_as(self.admin)
# create new repo for admin
data = {
'operation': 'move',
'dst_repo': self.repo_id,
'dst_dir': self.folder_path,
}
url = reverse('api-v2.1-file-view', args=[admin_repo_id])
resp = self.client.post(url + '?p=/' + admin_file_name, data)
self.assertEqual(403, resp.status_code)
def test_can_copy_file(self):
self.login_as(self.user)
# check old file name exist
assert self.file_name == self.get_lib_file_name(self.repo_id)
# copy file
dst_repo_id = self.create_new_repo()
data = {
'operation': 'copy',
'dst_repo': dst_repo_id,
'dst_dir': '/',
}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(200, resp.status_code)
# check old file still in old repo
assert self.file_name == self.get_lib_file_name(self.repo_id)
# check old file has been moved to dst repo
assert self.file_name == self.get_lib_file_name(dst_repo_id)
self.remove_repo(dst_repo_id)
def test_copy_file_with_invalid_src_repo_perm(self):
# login as admin, then copy file in user's repo
self.login_as(self.admin)
# copy file
dst_repo_id = self.admin_create_new_repo()
data = {
'operation': 'copy',
'dst_repo': dst_repo_id,
'dst_dir': '/',
}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(403, resp.status_code)
def test_copy_file_with_invalid_dst_repo_perm(self):
# login as user, then copy file to admin's repo
self.login_as(self.user)
# create new repo for admin
dst_repo_id = self.admin_create_new_repo()
data = {
'operation': 'copy',
'dst_repo': dst_repo_id,
'dst_dir': '/',
}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(403, resp.status_code)
def test_copy_file_with_invalid_dst_folder_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.repo_id, self.user_name,
self.admin_name, 'rw')
# set sub-folder permisson as 'r' for admin
seafile_api.add_folder_user_perm(self.repo_id,
self.folder_path, 'r', self.admin_name)
# admin can visit sub-folder with 'r' permission
assert seafile_api.check_permission_by_path(self.repo_id,
self.folder_path, self.admin_name) == 'r'
# create a file for admin repo
admin_repo_id = self.admin_create_new_repo()
admin_file_name = randstring(6)
seafile_api.post_empty_file(repo_id=admin_repo_id,
parent_dir='/', filename=admin_file_name,
username=self.admin_name)
# login as admin, then move file to a 'r' permission folder
self.login_as(self.admin)
# create new repo for admin
data = {
'operation': 'copy',
'dst_repo': self.repo_id,
'dst_dir': self.folder_path,
}
url = reverse('api-v2.1-file-view', args=[admin_repo_id])
resp = self.client.post(url + '?p=/' + admin_file_name, data)
self.assertEqual(403, resp.status_code)
def test_can_revert_file(self):
self.login_as(self.user)
# first rename file
new_name = randstring(6)
seafile_api.rename_file(self.repo_id, '/', self.file_name,
new_name, self.user_name)
new_file_path = '/' + new_name
# get file revisions
commits = get_all_file_revisions(self.repo_id, new_file_path)
# then revert file
data = {
'operation': 'revert',
'commit_id': commits[0].id
}
resp = self.client.post(self.url + '?p=' + new_file_path, data)
self.assertEqual(200, resp.status_code)
def test_revert_file_with_invalid_user_permission(self):
# first rename file
new_name = randstring(6)
seafile_api.rename_file(self.repo_id, '/', self.file_name,
new_name, self.user_name)
new_file_path = '/' + new_name
# get file revisions
commits = get_all_file_revisions(self.repo_id, new_file_path)
# then revert file
data = {
'operation': 'revert',
'commit_id': commits[0].id
}
resp = self.client.post(self.url + '?p=' + new_file_path, data)
self.assertEqual(403, resp.status_code)
def test_revert_file_with_r_permission(self):
# first rename file
new_name = randstring(6)
seafile_api.rename_file(self.repo_id, '/', self.file_name,
new_name, self.user_name)
new_file_path = '/' + new_name
# get file revisions
commits = get_all_file_revisions(self.repo_id, new_file_path)
self.share_repo_to_admin_with_r_permission()
self.login_as(self.admin)
# then revert file
data = {
'operation': 'revert',
'commit_id': commits[0].id
}
resp = self.client.post(self.url + '?p=' + new_file_path, data)
self.assertEqual(403, resp.status_code)
def test_revert_file_without_commit_id(self):
self.login_as(self.user)
data = {
'operation': 'revert',
}
resp = self.client.post(self.url + '?p=' + self.file_path, data)
self.assertEqual(400, resp.status_code)
# for test http PUT request
def test_can_lock_file(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.user)
# check file NOT locked when init
return_value = seafile_api.check_file_lock(self.repo_id,
self.file_path.lstrip('/'), self.user.username)
assert return_value == 0
# lock file
data = 'operation=lock'
resp = self.client.put(self.url + '?p=' + self.file_path, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
# check file has been locked
return_value = seafile_api.check_file_lock(self.repo_id,
self.file_path.lstrip('/'), self.user.username)
assert return_value == 2
def test_lock_file_with_invalid_repo_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# login as admin, then lock file in user's repo
self.login_as(self.admin)
# lock file
data = 'operation=lock'
resp = self.client.put(self.url + '?p=' + self.file_path, data, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
def test_lock_file_with_invalid_folder_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# create a file in user repo sub-folder
file_name = randstring(6)
seafile_api.post_empty_file(repo_id=self.repo_id,
parent_dir=self.folder_path, filename=file_name,
username=self.user_name)
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.repo_id, self.user_name,
self.admin_name, 'rw')
# set sub-folder permisson as 'r' for admin
seafile_api.add_folder_user_perm(self.repo_id,
self.folder_path, 'r', self.admin_name)
# admin can visit file with 'r' permission
file_path = posixpath.join(self.folder_path, file_name)
assert seafile_api.check_permission_by_path(self.repo_id,
file_path, self.admin_name) == 'r'
# login as admin, then lock a 'r' permission file
self.login_as(self.admin)
data = 'operation=lock'
resp = self.client.put(self.url + '?p=' + file_path,
data, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
def test_can_unlock_file(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.user)
# lock file for test
seafile_api.lock_file(self.repo_id, self.file_path.lstrip('/'),
self.user.username, -1)
# check file has been locked when init
return_value = seafile_api.check_file_lock(self.repo_id,
self.file_path.lstrip('/'), self.user.username)
assert return_value == 2
# unlock file
data = 'operation=unlock'
resp = self.client.put(self.url + '?p=' + self.file_path, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
# check file has been unlocked
return_value = seafile_api.check_file_lock(self.repo_id,
self.file_path.lstrip('/'), self.user.username)
assert return_value == 0
def test_unlock_file_with_invalid_repo_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# login as admin, then unlock file in user's repo
self.login_as(self.admin)
# unlock file
data = 'operation=unlock'
resp = self.client.put(self.url + '?p=' + self.file_path, data, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
def test_unlock_file_with_invalid_folder_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# create a file in user repo sub-folder
file_name = randstring(6)
seafile_api.post_empty_file(repo_id=self.repo_id,
parent_dir=self.folder_path, filename=file_name,
username=self.user_name)
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.repo_id, self.user_name,
self.admin_name, 'rw')
# set sub-folder permisson as 'r' for admin
seafile_api.add_folder_user_perm(self.repo_id,
self.folder_path, 'r', self.admin_name)
# admin can visit file with 'r' permission
file_path = posixpath.join(self.folder_path, file_name)
assert seafile_api.check_permission_by_path(self.repo_id,
file_path, self.admin_name) == 'r'
# login as admin, then lock a 'r' permission file
self.login_as(self.admin)
data = 'operation=unlock'
resp = self.client.put(self.url + '?p=' + file_path,
data, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
# for test http DELETE request
def test_can_delete_file(self):
self.login_as(self.user)
# check old file name exist
assert self.file_name == self.get_lib_file_name(self.repo_id)
# delete file
resp = self.client.delete(self.url + '?p=' + self.file_path,
{}, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
# check old file has been deleted
assert None == self.get_lib_file_name(self.repo_id)
def test_delete_file_with_invalid_repo_perm(self):
# login as admin, then delete file in user's repo
self.login_as(self.admin)
# delete file
resp = self.client.delete(self.url + '?p=' + self.file_path,
{}, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
def test_delete_file_with_invalid_folder_perm(self):
if not LOCAL_PRO_DEV_ENV:
return
# create a file in user repo sub-folder
file_name = randstring(6)
seafile_api.post_empty_file(repo_id=self.repo_id,
parent_dir=self.folder_path, filename=file_name,
username=self.user_name)
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.repo_id, self.user_name,
self.admin_name, 'rw')
# set sub-folder permisson as 'r' for admin
seafile_api.add_folder_user_perm(self.repo_id,
self.folder_path, 'r', self.admin_name)
# admin can visit file with 'r' permission
file_path = posixpath.join(self.folder_path, file_name)
assert seafile_api.check_permission_by_path(self.repo_id,
file_path, self.admin_name) == 'r'
# login as admin, then delete a 'r' permission file
self.login_as(self.admin)
resp = self.client.delete(self.url + '?p=' + file_path,
{}, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
| |
#!/usr/bin/env python
#
# Copyright 2010 Facebook
# Copyright 2015 Mobolic
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Python client library for the Facebook Platform.
This client library is designed to support the Graph API and the
official Facebook JavaScript SDK, which is the canonical way to
implement Facebook authentication. Read more about the Graph API at
https://developers.facebook.com/docs/graph-api.
"""
import hashlib
import hmac
import binascii
import base64
import requests
import json
import re
try:
from urllib.parse import parse_qs, urlencode, urlparse
except ImportError:
from urlparse import parse_qs, urlparse
from urllib import urlencode
__version__ = "3.2.0-pre"
FACEBOOK_GRAPH_URL = "https://graph.facebook.com/"
FACEBOOK_WWW_URL = "https://www.facebook.com/"
FACEBOOK_OAUTH_DIALOG_PATH = "dialog/oauth?"
VALID_API_VERSIONS = ["2.8", "2.9", "2.10", "2.11", "2.12", "3.0", "3.1"]
VALID_SEARCH_TYPES = ["place", "placetopic"]
class GraphAPI(object):
"""A client for the Facebook Graph API.
https://developers.facebook.com/docs/graph-api
The Graph API is made up of the objects in Facebook (e.g., people,
pages, events, photos) and the connections between them (e.g.,
friends, photo tags, and event RSVPs). This client provides access
to those primitive types in a generic way. For example, given an
OAuth access token, this will fetch the profile of the active user
and the list of the user's friends:
graph = facebook.GraphAPI(access_token)
user = graph.get_object("me")
friends = graph.get_connections(user["id"], "friends")
You can see a list of all of the objects and connections supported
by the API at https://developers.facebook.com/docs/graph-api/reference/.
You can obtain an access token via OAuth or by using the Facebook
JavaScript SDK. See
https://developers.facebook.com/docs/facebook-login for details.
If you are using the JavaScript SDK, you can use the
get_user_from_cookie() method below to get the OAuth access token
for the active user from the cookie saved by the SDK.
"""
def __init__(
self,
access_token=None,
timeout=None,
version=None,
proxies=None,
session=None,
):
# The default version is only used if the version kwarg does not exist.
default_version = VALID_API_VERSIONS[0]
self.access_token = access_token
self.timeout = timeout
self.proxies = proxies
self.session = session or requests.Session()
if version:
version_regex = re.compile("^\d\.\d{1,2}$")
match = version_regex.search(str(version))
if match is not None:
if str(version) not in VALID_API_VERSIONS:
raise GraphAPIError(
"Valid API versions are "
+ str(VALID_API_VERSIONS).strip("[]")
)
else:
self.version = "v" + str(version)
else:
raise GraphAPIError(
"Version number should be in the"
" following format: #.# (e.g. 2.0)."
)
else:
self.version = "v" + default_version
def get_permissions(self, user_id):
"""Fetches the permissions object from the graph."""
response = self.request(
"{0}/{1}/permissions".format(self.version, user_id), {}
)["data"]
return {x["permission"] for x in response if x["status"] == "granted"}
def get_object(self, id, **args):
"""Fetches the given object from the graph."""
return self.request("{0}/{1}".format(self.version, id), args)
def get_objects(self, ids, **args):
"""Fetches all of the given object from the graph.
We return a map from ID to object. If any of the IDs are
invalid, we raise an exception.
"""
args["ids"] = ",".join(ids)
return self.request(self.version + "/", args)
def search(self, type, **args):
"""https://developers.facebook.com/docs/places/search"""
if type not in VALID_SEARCH_TYPES:
raise GraphAPIError(
"Valid types are: %s" % ", ".join(VALID_SEARCH_TYPES)
)
args["type"] = type
return self.request(self.version + "/search/", args)
def get_connections(self, id, connection_name, **args):
"""Fetches the connections for given object."""
return self.request(
"{0}/{1}/{2}".format(self.version, id, connection_name), args
)
def get_all_connections(self, id, connection_name, **args):
"""Get all pages from a get_connections call
This will iterate over all pages returned by a get_connections call
and yield the individual items.
"""
while True:
page = self.get_connections(id, connection_name, **args)
for post in page["data"]:
yield post
next = page.get("paging", {}).get("next")
if not next:
return
args = parse_qs(urlparse(next).query)
del args["access_token"]
def put_object(self, parent_object, connection_name, **data):
"""Writes the given object to the graph, connected to the given parent.
For example,
graph.put_object("me", "feed", message="Hello, world")
writes "Hello, world" to the active user's wall. Likewise, this
will comment on the first post of the active user's feed:
feed = graph.get_connections("me", "feed")
post = feed["data"][0]
graph.put_object(post["id"], "comments", message="First!")
Certain operations require extended permissions. See
https://developers.facebook.com/docs/facebook-login/permissions
for details about permissions.
"""
assert self.access_token, "Write operations require an access token"
return self.request(
"{0}/{1}/{2}".format(self.version, parent_object, connection_name),
post_args=data,
method="POST",
)
def put_comment(self, object_id, message):
"""Writes the given comment on the given post."""
return self.put_object(object_id, "comments", message=message)
def put_like(self, object_id):
"""Likes the given post."""
return self.put_object(object_id, "likes")
def delete_object(self, id):
"""Deletes the object with the given ID from the graph."""
return self.request(
"{0}/{1}".format(self.version, id), method="DELETE"
)
def delete_request(self, user_id, request_id):
"""Deletes the Request with the given ID for the given user."""
return self.request(
"{0}_{1}".format(request_id, user_id), method="DELETE"
)
def put_photo(self, image, album_path="me/photos", **kwargs):
"""
Upload an image using multipart/form-data.
image - A file object representing the image to be uploaded.
album_path - A path representing where the image should be uploaded.
"""
return self.request(
"{0}/{1}".format(self.version, album_path),
post_args=kwargs,
files={"source": image},
method="POST",
)
def get_version(self):
"""Fetches the current version number of the Graph API being used."""
args = {"access_token": self.access_token}
try:
response = self.session.request(
"GET",
FACEBOOK_GRAPH_URL + self.version + "/me",
params=args,
timeout=self.timeout,
proxies=self.proxies,
)
except requests.HTTPError as e:
response = json.loads(e.read())
raise GraphAPIError(response)
try:
headers = response.headers
version = headers["facebook-api-version"].replace("v", "")
return str(version)
except Exception:
raise GraphAPIError("API version number not available")
def request(
self, path, args=None, post_args=None, files=None, method=None
):
"""Fetches the given path in the Graph API.
We translate args to a valid query string. If post_args is
given, we send a POST request to the given path with the given
arguments.
"""
if args is None:
args = dict()
if post_args is not None:
method = "POST"
# Add `access_token` to post_args or args if it has not already been
# included.
if self.access_token:
# If post_args exists, we assume that args either does not exists
# or it does not need `access_token`.
if post_args and "access_token" not in post_args:
post_args["access_token"] = self.access_token
elif "access_token" not in args:
args["access_token"] = self.access_token
try:
response = self.session.request(
method or "GET",
FACEBOOK_GRAPH_URL + path,
timeout=self.timeout,
params=args,
data=post_args,
proxies=self.proxies,
files=files,
)
except requests.HTTPError as e:
response = json.loads(e.read())
raise GraphAPIError(response)
headers = response.headers
if "json" in headers["content-type"]:
result = response.json()
elif "image/" in headers["content-type"]:
mimetype = headers["content-type"]
result = {
"data": response.content,
"mime-type": mimetype,
"url": response.url,
}
elif "access_token" in parse_qs(response.text):
query_str = parse_qs(response.text)
if "access_token" in query_str:
result = {"access_token": query_str["access_token"][0]}
if "expires" in query_str:
result["expires"] = query_str["expires"][0]
else:
raise GraphAPIError(response.json())
else:
raise GraphAPIError("Maintype was not text, image, or querystring")
if result and isinstance(result, dict) and result.get("error"):
raise GraphAPIError(result)
return result
def get_app_access_token(self, app_id, app_secret, offline=False):
"""
Get the application's access token as a string.
If offline=True, use the concatenated app ID and secret
instead of making an API call.
<https://developers.facebook.com/docs/facebook-login/
access-tokens#apptokens>
"""
if offline:
return "{0}|{1}".format(app_id, app_secret)
else:
args = {
"grant_type": "client_credentials",
"client_id": app_id,
"client_secret": app_secret,
}
return self.request(
"{0}/oauth/access_token".format(self.version), args=args
)["access_token"]
def get_access_token_from_code(
self, code, redirect_uri, app_id, app_secret
):
"""Get an access token from the "code" returned from an OAuth dialog.
Returns a dict containing the user-specific access token and its
expiration date (if applicable).
"""
args = {
"code": code,
"redirect_uri": redirect_uri,
"client_id": app_id,
"client_secret": app_secret,
}
return self.request(
"{0}/oauth/access_token".format(self.version), args
)
def extend_access_token(self, app_id, app_secret):
"""
Extends the expiration time of a valid OAuth access token. See
<https://developers.facebook.com/docs/facebook-login/access-tokens/
expiration-and-extension>
"""
args = {
"client_id": app_id,
"client_secret": app_secret,
"grant_type": "fb_exchange_token",
"fb_exchange_token": self.access_token,
}
return self.request(
"{0}/oauth/access_token".format(self.version), args=args
)
def debug_access_token(self, token, app_id, app_secret):
"""
Gets information about a user access token issued by an app. See
<https://developers.facebook.com/docs/facebook-login/
access-tokens/debugging-and-error-handling>
We can generate the app access token by concatenating the app
id and secret: <https://developers.facebook.com/docs/
facebook-login/access-tokens#apptokens>
"""
args = {
"input_token": token,
"access_token": "{0}|{1}".format(app_id, app_secret),
}
return self.request(self.version + "/" + "debug_token", args=args)
def get_auth_url(self, app_id, canvas_url, perms=None, **kwargs):
"""Build a URL to create an OAuth dialog."""
url = "{0}{1}/{2}".format(
FACEBOOK_WWW_URL, self.version, FACEBOOK_OAUTH_DIALOG_PATH
)
args = {"client_id": app_id, "redirect_uri": canvas_url}
if perms:
args["scope"] = ",".join(perms)
args.update(kwargs)
return url + urlencode(args)
class GraphAPIError(Exception):
def __init__(self, result):
self.result = result
self.code = None
try:
self.type = result["error_code"]
except (KeyError, TypeError):
self.type = ""
# OAuth 2.0 Draft 10
try:
self.message = result["error_description"]
except (KeyError, TypeError):
# OAuth 2.0 Draft 00
try:
self.message = result["error"]["message"]
self.code = result["error"].get("code")
if not self.type:
self.type = result["error"].get("type", "")
except (KeyError, TypeError):
# REST server style
try:
self.message = result["error_msg"]
except (KeyError, TypeError):
self.message = result
Exception.__init__(self, self.message)
def get_user_from_cookie(cookies, app_id, app_secret):
"""Parses the cookie set by the official Facebook JavaScript SDK.
cookies should be a dictionary-like object mapping cookie names to
cookie values.
If the user is logged in via Facebook, we return a dictionary with
the keys "uid" and "access_token". The former is the user's
Facebook ID, and the latter can be used to make authenticated
requests to the Graph API. If the user is not logged in, we
return None.
Read more about Facebook authentication at
https://developers.facebook.com/docs/facebook-login.
"""
cookie = cookies.get("fbsr_" + app_id, "")
if not cookie:
return None
parsed_request = parse_signed_request(cookie, app_secret)
if not parsed_request:
return None
try:
result = GraphAPI().get_access_token_from_code(
parsed_request["code"], "", app_id, app_secret
)
except GraphAPIError:
return None
result["uid"] = parsed_request["user_id"]
return result
def parse_signed_request(signed_request, app_secret):
""" Return dictionary with signed request data.
We return a dictionary containing the information in the
signed_request. This includes a user_id if the user has authorised
your application, as well as any information requested.
If the signed_request is malformed or corrupted, False is returned.
"""
try:
encoded_sig, payload = map(str, signed_request.split(".", 1))
sig = base64.urlsafe_b64decode(
encoded_sig + "=" * ((4 - len(encoded_sig) % 4) % 4)
)
data = base64.urlsafe_b64decode(
payload + "=" * ((4 - len(payload) % 4) % 4)
)
except IndexError:
# Signed request was malformed.
return False
except TypeError:
# Signed request had a corrupted payload.
return False
except binascii.Error:
# Signed request had a corrupted payload.
return False
data = json.loads(data.decode("ascii"))
if data.get("algorithm", "").upper() != "HMAC-SHA256":
return False
# HMAC can only handle ascii (byte) strings
# https://bugs.python.org/issue5285
app_secret = app_secret.encode("ascii")
payload = payload.encode("ascii")
expected_sig = hmac.new(
app_secret, msg=payload, digestmod=hashlib.sha256
).digest()
if sig != expected_sig:
return False
return data
| |
"""
Support for the Uber API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.uber/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['uber_rides==0.2.5']
_LOGGER = logging.getLogger(__name__)
CONF_END_LATITUDE = 'end_latitude'
CONF_END_LONGITUDE = 'end_longitude'
CONF_PRODUCT_IDS = 'product_ids'
CONF_SERVER_TOKEN = 'server_token'
CONF_START_LATITUDE = 'start_latitude'
CONF_START_LONGITUDE = 'start_longitude'
ICON = 'mdi:taxi'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SERVER_TOKEN): cv.string,
vol.Required(CONF_START_LATITUDE): cv.latitude,
vol.Required(CONF_START_LONGITUDE): cv.longitude,
vol.Optional(CONF_END_LATITUDE): cv.latitude,
vol.Optional(CONF_END_LONGITUDE): cv.longitude,
vol.Optional(CONF_PRODUCT_IDS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Uber sensor."""
from uber_rides.session import Session
session = Session(server_token=config.get(CONF_SERVER_TOKEN))
wanted_product_ids = config.get(CONF_PRODUCT_IDS)
dev = []
timeandpriceest = UberEstimate(session, config[CONF_START_LATITUDE],
config[CONF_START_LONGITUDE],
config.get(CONF_END_LATITUDE),
config.get(CONF_END_LONGITUDE))
for product_id, product in timeandpriceest.products.items():
if (wanted_product_ids is not None) and \
(product_id not in wanted_product_ids):
continue
dev.append(UberSensor('time', timeandpriceest, product_id, product))
if (product.get('price_details') is not None) and \
product['price_details']['estimate'] is not 'Metered':
dev.append(UberSensor(
'price', timeandpriceest, product_id, product))
add_devices(dev)
# pylint: disable=too-few-public-methods
class UberSensor(Entity):
"""Implementation of an Uber sensor."""
def __init__(self, sensorType, products, product_id, product):
"""Initialize the Uber sensor."""
self.data = products
self._product_id = product_id
self._product = product
self._sensortype = sensorType
self._name = '{} {}'.format(self._product['display_name'],
self._sensortype)
if self._sensortype == 'time':
self._unit_of_measurement = 'min'
time_estimate = self._product.get('time_estimate_seconds', 0)
self._state = int(time_estimate / 60)
elif self._sensortype == 'price':
if self._product.get('price_details') is not None:
price_details = self._product['price_details']
self._unit_of_measurement = price_details.get('currency_code')
if price_details.get('low_estimate') is not None:
statekey = 'minimum'
else:
statekey = 'low_estimate'
self._state = int(price_details.get(statekey, 0))
else:
self._state = 0
self.update()
@property
def name(self):
"""Return the name of the sensor."""
if 'uber' not in self._name.lower():
self._name = 'Uber{}'.format(self._name)
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
time_estimate = self._product.get("time_estimate_seconds")
params = {
'Product ID': self._product['product_id'],
'Product short description': self._product['short_description'],
'Product display name': self._product['display_name'],
'Product description': self._product['description'],
'Pickup time estimate (in seconds)': time_estimate,
'Trip duration (in seconds)': self._product.get('duration'),
'Vehicle Capacity': self._product['capacity']
}
if self._product.get('price_details') is not None:
price_details = self._product['price_details']
dunit = price_details.get('distance_unit')
distance_key = 'Trip distance (in {}s)'.format(dunit)
distance_val = self._product.get('distance')
params['Cost per minute'] = price_details.get('cost_per_minute')
params['Distance units'] = price_details.get('distance_unit')
params['Cancellation fee'] = price_details.get('cancellation_fee')
cpd = price_details.get('cost_per_distance')
params['Cost per distance'] = cpd
params['Base price'] = price_details.get('base')
params['Minimum price'] = price_details.get('minimum')
params['Price estimate'] = price_details.get('estimate')
params['Price currency code'] = price_details.get('currency_code')
params['High price estimate'] = price_details.get('high_estimate')
params['Low price estimate'] = price_details.get('low_estimate')
params['Surge multiplier'] = price_details.get('surge_multiplier')
else:
distance_key = 'Trip distance (in miles)'
distance_val = self._product.get('distance')
params[distance_key] = distance_val
return {k: v for k, v in params.items() if v is not None}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
# pylint: disable=too-many-branches
def update(self):
"""Get the latest data from the Uber API and update the states."""
self.data.update()
self._product = self.data.products[self._product_id]
if self._sensortype == 'time':
time_estimate = self._product.get('time_estimate_seconds', 0)
self._state = int(time_estimate / 60)
elif self._sensortype == 'price':
price_details = self._product.get('price_details')
if price_details is not None:
min_price = price_details.get('minimum')
self._state = int(price_details.get('low_estimate', min_price))
else:
self._state = 0
# pylint: disable=too-few-public-methods
class UberEstimate(object):
"""The class for handling the time and price estimate."""
# pylint: disable=too-many-arguments
def __init__(self, session, start_latitude, start_longitude,
end_latitude=None, end_longitude=None):
"""Initialize the UberEstimate object."""
self._session = session
self.start_latitude = start_latitude
self.start_longitude = start_longitude
self.end_latitude = end_latitude
self.end_longitude = end_longitude
self.products = None
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest product info and estimates from the Uber API."""
from uber_rides.client import UberRidesClient
client = UberRidesClient(self._session)
self.products = {}
products_response = client.get_products(
self.start_latitude, self.start_longitude)
products = products_response.json.get('products')
for product in products:
self.products[product['product_id']] = product
if self.end_latitude is not None and self.end_longitude is not None:
price_response = client.get_price_estimates(
self.start_latitude, self.start_longitude,
self.end_latitude, self.end_longitude)
prices = price_response.json.get('prices', [])
for price in prices:
product = self.products[price['product_id']]
product['duration'] = price.get('duration', '0')
product['distance'] = price.get('distance', '0')
price_details = product.get('price_details')
if product.get('price_details') is None:
price_details = {}
price_details['estimate'] = price.get('estimate', '0')
price_details['high_estimate'] = price.get('high_estimate',
'0')
price_details['low_estimate'] = price.get('low_estimate', '0')
price_details['currency_code'] = price.get('currency_code')
surge_multiplier = price.get('surge_multiplier', '0')
price_details['surge_multiplier'] = surge_multiplier
product['price_details'] = price_details
estimate_response = client.get_pickup_time_estimates(
self.start_latitude, self.start_longitude)
estimates = estimate_response.json.get('times')
for estimate in estimates:
self.products[estimate['product_id']][
'time_estimate_seconds'] = estimate.get('estimate', '0')
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Slicehost Driver
"""
from libcloud.types import NodeState, Provider, InvalidCredsException
from libcloud.base import ConnectionKey, Response, NodeDriver, Node
from libcloud.base import NodeSize, NodeImage, NodeLocation
from libcloud.base import is_private_subnet
import base64
import socket
from xml.etree import ElementTree as ET
from xml.parsers.expat import ExpatError
class SlicehostResponse(Response):
def parse_body(self):
# length of 1 can't be valid XML, but on destroy node, slicehost returns
# a 1 byte response with a "Content-Type: application/xml" header. booya.
if not self.body or len(self.body) <= 1:
return None
return ET.XML(self.body)
def parse_error(self):
if self.status == 401:
raise InvalidCredsException(self.body)
try:
object = ET.XML(self.body)
return "; ".join([ err.text
for err in
object.findall('error') ])
except ExpatError:
return self.body
class SlicehostConnection(ConnectionKey):
"""
Connection class for the Slicehost driver
"""
host = 'api.slicehost.com'
responseCls = SlicehostResponse
def add_default_headers(self, headers):
headers['Authorization'] = ('Basic %s'
% (base64.b64encode('%s:' % self.key)))
return headers
class SlicehostNodeDriver(NodeDriver):
"""
Slicehost node driver
"""
connectionCls = SlicehostConnection
type = Provider.SLICEHOST
name = 'Slicehost'
features = {"create_node": ["generates_password"]}
NODE_STATE_MAP = { 'active': NodeState.RUNNING,
'build': NodeState.PENDING,
'reboot': NodeState.REBOOTING,
'hard_reboot': NodeState.REBOOTING,
'terminated': NodeState.TERMINATED }
def list_nodes(self):
return self._to_nodes(self.connection.request('/slices.xml').object)
def list_sizes(self, location=None):
return self._to_sizes(self.connection.request('/flavors.xml').object)
def list_images(self, location=None):
return self._to_images(self.connection.request('/images.xml').object)
def list_locations(self):
return [
NodeLocation(0, 'Slicehost St. Louis (STL-A)', 'US', self),
NodeLocation(0, 'Slicehost St. Louis (STL-B)', 'US', self),
NodeLocation(0, 'Slicehost Dallas-Fort Worth (DFW-1)', 'US', self)
]
def create_node(self, **kwargs):
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
uri = '/slices.xml'
# create a slice obj
root = ET.Element('slice')
el_name = ET.SubElement(root, 'name')
el_name.text = name
flavor_id = ET.SubElement(root, 'flavor-id')
flavor_id.text = str(size.id)
image_id = ET.SubElement(root, 'image-id')
image_id.text = str(image.id)
xml = ET.tostring(root)
node = self._to_nodes(
self.connection.request(
uri,
method='POST',
data=xml,
headers={'Content-Type': 'application/xml'}
).object
)[0]
return node
def reboot_node(self, node):
"""Reboot the node by passing in the node object"""
# 'hard' could bubble up as kwarg depending on how reboot_node
# turns out. Defaulting to soft reboot.
#hard = False
#reboot = self.api.hard_reboot if hard else self.api.reboot
#expected_status = 'hard_reboot' if hard else 'reboot'
uri = '/slices/%s/reboot.xml' % (node.id)
node = self._to_nodes(
self.connection.request(uri, method='PUT').object
)[0]
return node.state == NodeState.REBOOTING
def destroy_node(self, node):
"""Destroys the node
Requires 'Allow Slices to be deleted or rebuilt from the API' to be
ticked at https://manage.slicehost.com/api, otherwise returns::
<errors>
<error>You must enable slice deletes in the SliceManager</error>
<error>Permission denied</error>
</errors>
"""
uri = '/slices/%s/destroy.xml' % (node.id)
self.connection.request(uri, method='PUT')
return True
def _to_nodes(self, object):
if object.tag == 'slice':
return [ self._to_node(object) ]
node_elements = object.findall('slice')
return [ self._to_node(el) for el in node_elements ]
def _to_node(self, element):
attrs = [ 'name', 'image-id', 'progress', 'id', 'bw-out', 'bw-in',
'flavor-id', 'status', 'ip-address', 'root-password' ]
node_attrs = {}
for attr in attrs:
node_attrs[attr] = element.findtext(attr)
# slicehost does not determine between public and private, so we
# have to figure it out
public_ip = element.findtext('ip-address')
private_ip = None
for addr in element.findall('addresses/address'):
ip = addr.text
try:
socket.inet_aton(ip)
except socket.error:
# not a valid ip
continue
if is_private_subnet(ip):
private_ip = ip
else:
public_ip = ip
try:
state = self.NODE_STATE_MAP[element.findtext('status')]
except:
state = NodeState.UNKNOWN
# for consistency with other drivers, we put this in two places.
node_attrs['password'] = node_attrs['root-password']
extra = {}
for k in node_attrs.keys():
ek = k.replace("-", "_")
extra[ek] = node_attrs[k]
n = Node(id=element.findtext('id'),
name=element.findtext('name'),
state=state,
public_ip=[public_ip],
private_ip=[private_ip],
driver=self.connection.driver,
extra=extra)
return n
def _to_sizes(self, object):
if object.tag == 'flavor':
return [ self._to_size(object) ]
elements = object.findall('flavor')
return [ self._to_size(el) for el in elements ]
def _to_size(self, element):
s = NodeSize(id=int(element.findtext('id')),
name=str(element.findtext('name')),
ram=int(element.findtext('ram')),
disk=None, # XXX: needs hardcode
bandwidth=None, # XXX: needs hardcode
price=float(element.findtext('price'))/(100*24*30),
driver=self.connection.driver)
return s
def _to_images(self, object):
if object.tag == 'image':
return [ self._to_image(object) ]
elements = object.findall('image')
return [ self._to_image(el) for el in elements ]
def _to_image(self, element):
i = NodeImage(id=int(element.findtext('id')),
name=str(element.findtext('name')),
driver=self.connection.driver)
return i
| |
# Copyright (c) 2016 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from yubikit.core import TRANSPORT
from yubikit.core.otp import OtpConnection
from yubikit.core.fido import FidoConnection
from yubikit.core.smartcard import SmartCardConnection
from yubikit.management import CAPABILITY, USB_INTERFACE
from yubikit.yubiotp import YubiOtpSession
from yubikit.oath import OathSession
from .util import cli_fail
from ..device import is_fips_version, get_name, connect_to_device
from ..otp import is_in_fips_mode as otp_in_fips_mode
from ..oath import is_in_fips_mode as oath_in_fips_mode
from ..fido import is_in_fips_mode as ctap_in_fips_mode
from typing import List
import click
import logging
logger = logging.getLogger(__name__)
SHOWN_CAPABILITIES = set(CAPABILITY)
def print_app_status_table(supported_apps, enabled_apps):
usb_supported = supported_apps.get(TRANSPORT.USB, 0)
usb_enabled = enabled_apps.get(TRANSPORT.USB, 0)
nfc_supported = supported_apps.get(TRANSPORT.NFC, 0)
nfc_enabled = enabled_apps.get(TRANSPORT.NFC, 0)
rows = []
for app in SHOWN_CAPABILITIES:
if app & usb_supported:
if app & usb_enabled:
usb_status = "Enabled"
else:
usb_status = "Disabled"
else:
usb_status = "Not available"
if nfc_supported:
if app & nfc_supported:
if app & nfc_enabled:
nfc_status = "Enabled"
else:
nfc_status = "Disabled"
else:
nfc_status = "Not available"
rows.append([str(app), usb_status, nfc_status])
else:
rows.append([str(app), usb_status])
column_l: List[int] = []
for row in rows:
for idx, c in enumerate(row):
if len(column_l) > idx:
if len(c) > column_l[idx]:
column_l[idx] = len(c)
else:
column_l.append(len(c))
f_apps = "Applications".ljust(column_l[0])
if nfc_supported:
f_USB = "USB".ljust(column_l[1])
f_NFC = "NFC".ljust(column_l[2])
f_table = ""
for row in rows:
for idx, c in enumerate(row):
f_table += f"{c.ljust(column_l[idx])}\t"
f_table += "\n"
if nfc_supported:
click.echo(f"{f_apps}\t{f_USB}\t{f_NFC}")
else:
click.echo(f"{f_apps}")
click.echo(f_table, nl=False)
def get_overall_fips_status(pid, info):
statuses = {}
usb_enabled = info.config.enabled_capabilities[TRANSPORT.USB]
statuses["OTP"] = False
if usb_enabled & CAPABILITY.OTP:
with connect_to_device(info.serial, [OtpConnection])[0] as conn:
otp_app = YubiOtpSession(conn)
statuses["OTP"] = otp_in_fips_mode(otp_app)
statuses["OATH"] = False
if usb_enabled & CAPABILITY.OATH:
with connect_to_device(info.serial, [SmartCardConnection])[0] as conn:
oath_app = OathSession(conn)
statuses["OATH"] = oath_in_fips_mode(oath_app)
statuses["FIDO U2F"] = False
if usb_enabled & CAPABILITY.U2F:
with connect_to_device(info.serial, [FidoConnection])[0] as conn:
statuses["FIDO U2F"] = ctap_in_fips_mode(conn)
return statuses
def _check_fips_status(pid, info):
fips_status = get_overall_fips_status(pid, info)
click.echo()
click.echo(f"FIPS Approved Mode: {'Yes' if all(fips_status.values()) else 'No'}")
status_keys = list(fips_status.keys())
status_keys.sort()
for status_key in status_keys:
click.echo(f" {status_key}: {'Yes' if fips_status[status_key] else 'No'}")
@click.option(
"-c",
"--check-fips",
help="Check if YubiKey is in FIPS Approved mode (available on YubiKey 4 FIPS "
"only).",
is_flag=True,
)
@click.command()
@click.pass_context
def info(ctx, check_fips):
"""
Show general information.
Displays information about the attached YubiKey such as serial number,
firmware version, capabilities, etc.
"""
info = ctx.obj["info"]
pid = ctx.obj["pid"]
if pid is None:
interfaces = None
key_type = None
else:
interfaces = pid.get_interfaces()
key_type = pid.get_type()
device_name = get_name(info, key_type)
click.echo(f"Device type: {device_name}")
if info.serial:
click.echo(f"Serial number: {info.serial}")
if info.version:
f_version = ".".join(str(x) for x in info.version)
click.echo(f"Firmware version: {f_version}")
else:
click.echo(
"Firmware version: Uncertain, re-run with only one YubiKey connected"
)
if info.form_factor:
click.echo(f"Form factor: {info.form_factor!s}")
if interfaces:
f_interfaces = ", ".join(
t.name for t in USB_INTERFACE if t in USB_INTERFACE(interfaces)
)
click.echo(f"Enabled USB interfaces: {f_interfaces}")
if TRANSPORT.NFC in info.supported_capabilities:
f_nfc = (
"enabled"
if info.config.enabled_capabilities.get(TRANSPORT.NFC)
else "disabled"
)
click.echo(f"NFC transport is {f_nfc}.")
if info.is_locked:
click.echo("Configured capabilities are protected by a lock code.")
click.echo()
print_app_status_table(
info.supported_capabilities, info.config.enabled_capabilities
)
if check_fips:
if is_fips_version(info.version):
ctx.obj["conn"].close()
_check_fips_status(pid, info)
else:
cli_fail("Unable to check FIPS Approved mode - Not a YubiKey 4 FIPS")
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for downloading media from Google APIs."""
import urllib3.response
from google._async_resumable_media import _download
from google._async_resumable_media import _helpers
from google._async_resumable_media.requests import _request_helpers
from google.resumable_media import common
from google.resumable_media import _helpers as sync_helpers
from google.resumable_media.requests import download
_CHECKSUM_MISMATCH = download._CHECKSUM_MISMATCH
class Download(_request_helpers.RequestsMixin, _download.Download):
"""Helper to manage downloading a resource from a Google API.
"Slices" of the resource can be retrieved by specifying a range
with ``start`` and / or ``end``. However, in typical usage, neither
``start`` nor ``end`` is expected to be provided.
Args:
media_url (str): The URL containing the media to be downloaded.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
the downloaded resource can be written to.
start (int): The first byte in a range to be downloaded. If not
provided, but ``end`` is provided, will download from the
beginning to ``end`` of the media.
end (int): The last byte in a range to be downloaded. If not
provided, but ``start`` is provided, will download from the
``start`` to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. The response headers must contain
a checksum of the requested type. If the headers lack an
appropriate checksum (for instance in the case of transcoded or
ranged downloads where the remote service does not know the
correct checksum) an INFO-level log will be emitted. Supported
values are "md5", "crc32c" and None. The default is "md5".
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
"""
async def _write_to_stream(self, response):
"""Write response body to a write-able stream.
.. note:
This method assumes that the ``_stream`` attribute is set on the
current download.
Args:
response (~requests.Response): The HTTP response object.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
"""
# `_get_expected_checksum()` may return None even if a checksum was
# requested, in which case it will emit an info log _MISSING_CHECKSUM.
# If an invalid checksum type is specified, this will raise ValueError.
expected_checksum, checksum_object = sync_helpers._get_expected_checksum(
response, self._get_headers, self.media_url, checksum_type=self.checksum
)
local_checksum_object = _add_decoder(response, checksum_object)
async for chunk in response.content.iter_chunked(
_request_helpers._SINGLE_GET_CHUNK_SIZE
):
self._stream.write(chunk)
local_checksum_object.update(chunk)
if expected_checksum is not None:
actual_checksum = sync_helpers.prepare_checksum_digest(
checksum_object.digest()
)
if actual_checksum != expected_checksum:
msg = _CHECKSUM_MISMATCH.format(
self.media_url,
expected_checksum,
actual_checksum,
checksum_type=self.checksum.upper(),
)
raise common.DataCorruption(response, msg)
async def consume(self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT):
"""Consume the resource to be downloaded.
If a ``stream`` is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
ValueError: If the current :class:`Download` has already
finished.
"""
method, url, payload, headers = self._prepare_request()
# NOTE: We assume "payload is None" but pass it along anyway.
request_kwargs = {
"data": payload,
"headers": headers,
"retry_strategy": self._retry_strategy,
"timeout": timeout,
}
if self._stream is not None:
request_kwargs["stream"] = True
result = await _request_helpers.http_request(
transport, method, url, **request_kwargs
)
self._process_response(result)
if self._stream is not None:
await self._write_to_stream(result)
return result
class RawDownload(_request_helpers.RawRequestsMixin, _download.Download):
"""Helper to manage downloading a raw resource from a Google API.
"Slices" of the resource can be retrieved by specifying a range
with ``start`` and / or ``end``. However, in typical usage, neither
``start`` nor ``end`` is expected to be provided.
Args:
media_url (str): The URL containing the media to be downloaded.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
the downloaded resource can be written to.
start (int): The first byte in a range to be downloaded. If not
provided, but ``end`` is provided, will download from the
beginning to ``end`` of the media.
end (int): The last byte in a range to be downloaded. If not
provided, but ``start`` is provided, will download from the
``start`` to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. The response headers must contain
a checksum of the requested type. If the headers lack an
appropriate checksum (for instance in the case of transcoded or
ranged downloads where the remote service does not know the
correct checksum) an INFO-level log will be emitted. Supported
values are "md5", "crc32c" and None. The default is "md5".
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
"""
async def _write_to_stream(self, response):
"""Write response body to a write-able stream.
.. note:
This method assumes that the ``_stream`` attribute is set on the
current download.
Args:
response (~requests.Response): The HTTP response object.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
"""
# `_get_expected_checksum()` may return None even if a checksum was
# requested, in which case it will emit an info log _MISSING_CHECKSUM.
# If an invalid checksum type is specified, this will raise ValueError.
expected_checksum, checksum_object = sync_helpers._get_expected_checksum(
response, self._get_headers, self.media_url, checksum_type=self.checksum
)
async for chunk in response.content.iter_chunked(
_request_helpers._SINGLE_GET_CHUNK_SIZE
):
self._stream.write(chunk)
checksum_object.update(chunk)
if expected_checksum is not None:
actual_checksum = sync_helpers.prepare_checksum_digest(
checksum_object.digest()
)
if actual_checksum != expected_checksum:
msg = _CHECKSUM_MISMATCH.format(
self.media_url,
expected_checksum,
actual_checksum,
checksum_type=self.checksum.upper(),
)
raise common.DataCorruption(response, msg)
async def consume(self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT):
"""Consume the resource to be downloaded.
If a ``stream`` is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
~google.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
ValueError: If the current :class:`Download` has already
finished.
"""
method, url, payload, headers = self._prepare_request()
# NOTE: We assume "payload is None" but pass it along anyway.
result = await _request_helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_response(result)
if self._stream is not None:
await self._write_to_stream(result)
return result
class ChunkedDownload(_request_helpers.RequestsMixin, _download.ChunkedDownload):
"""Download a resource in chunks from a Google API.
Args:
media_url (str): The URL containing the media to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each
request.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
will be used to concatenate chunks of the resource as they are
downloaded.
start (int): The first byte in a range to be downloaded. If not
provided, defaults to ``0``.
end (int): The last byte in a range to be downloaded. If not
provided, will download to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with each request, e.g. headers for data encryption
key headers.
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each request.
Raises:
ValueError: If ``start`` is negative.
"""
async def consume_next_chunk(
self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT
):
"""
Consume the next chunk of the resource to be downloaded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
ValueError: If the current download has finished.
"""
method, url, payload, headers = self._prepare_request()
# NOTE: We assume "payload is None" but pass it along anyway.
result = await _request_helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
timeout=timeout,
)
await self._process_response(result)
return result
class RawChunkedDownload(_request_helpers.RawRequestsMixin, _download.ChunkedDownload):
"""Download a raw resource in chunks from a Google API.
Args:
media_url (str): The URL containing the media to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each
request.
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
will be used to concatenate chunks of the resource as they are
downloaded.
start (int): The first byte in a range to be downloaded. If not
provided, defaults to ``0``.
end (int): The last byte in a range to be downloaded. If not
provided, will download to the end of the media.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with each request, e.g. headers for data encryption
key headers.
Attributes:
media_url (str): The URL containing the media to be downloaded.
start (Optional[int]): The first byte in a range to be downloaded.
end (Optional[int]): The last byte in a range to be downloaded.
chunk_size (int): The number of bytes to be retrieved in each request.
Raises:
ValueError: If ``start`` is negative.
"""
async def consume_next_chunk(
self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT
):
"""Consume the next chunk of the resource to be downloaded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
ValueError: If the current download has finished.
"""
method, url, payload, headers = self._prepare_request()
# NOTE: We assume "payload is None" but pass it along anyway.
result = await _request_helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
timeout=timeout,
)
await self._process_response(result)
return result
def _add_decoder(response_raw, checksum):
"""Patch the ``_decoder`` on a ``urllib3`` response.
This is so that we can intercept the compressed bytes before they are
decoded.
Only patches if the content encoding is ``gzip``.
Args:
response_raw (urllib3.response.HTTPResponse): The raw response for
an HTTP request.
checksum (object):
A checksum which will be updated with compressed bytes.
Returns:
object: Either the original ``checksum`` if ``_decoder`` is not
patched, or a ``_DoNothingHash`` if the decoder is patched, since the
caller will no longer need to hash to decoded bytes.
"""
encoding = response_raw.headers.get("content-encoding", "").lower()
if encoding != "gzip":
return checksum
response_raw._decoder = _GzipDecoder(checksum)
return _helpers._DoNothingHash()
class _GzipDecoder(urllib3.response.GzipDecoder):
"""Custom subclass of ``urllib3`` decoder for ``gzip``-ed bytes.
Allows a checksum function to see the compressed bytes before they are
decoded. This way the checksum of the compressed value can be computed.
Args:
checksum (object):
A checksum which will be updated with compressed bytes.
"""
def __init__(self, checksum):
super(_GzipDecoder, self).__init__()
self._checksum = checksum
def decompress(self, data):
"""Decompress the bytes.
Args:
data (bytes): The compressed bytes to be decompressed.
Returns:
bytes: The decompressed bytes from ``data``.
"""
self._checksum.update(data)
return super(_GzipDecoder, self).decompress(data)
| |
'''Metrics to compare disaggregation performance against ground truth
data.
All metrics functions have the same interface. Each function takes
`predictions` and `ground_truth` parameters. Both of which are
nilmtk.MeterGroup objects. Each function returns one of two types:
either a pd.Series or a single float. Most functions return a
pd.Series where each index element is a meter instance int or a tuple
of ints for MeterGroups.
Notation
--------
Below is the notation used to mathematically define each metric.
:math:`T` - number of time slices.
:math:`t` - a time slice.
:math:`N` - number of appliances.
:math:`n` - an appliance.
:math:`y^{(n)}_t` - ground truth power of appliance :math:`n` in time slice :math:`t`.
:math:`\\hat{y}^{(n)}_t` - estimated power of appliance :math:`n` in time slice :math:`t`.
:math:`x^{(n)}_t` - ground truth state of appliance :math:`n` in time slice :math:`t`.
:math:`\\hat{x}^{(n)}_t` - estimated state of appliance :math:`n` in time slice :math:`t`.
Functions
---------
'''
from __future__ import print_function, division
import numpy as np
import pandas as pd
import math
from warnings import warn
from .metergroup import MeterGroup
from .metergroup import iterate_through_submeters_of_two_metergroups
from .electric import align_two_meters
def error_in_assigned_energy(predictions, ground_truth):
"""Compute error in assigned energy.
.. math::
error^{(n)} =
\\left | \\sum_t y^{(n)}_t - \\sum_t \\hat{y}^{(n)}_t \\right |
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
errors : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the absolute error in assigned energy for that appliance,
in kWh.
"""
errors = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
sections = pred_meter.good_sections()
ground_truth_energy = ground_truth_meter.total_energy(sections=sections)
predicted_energy = pred_meter.total_energy(sections=sections)
errors[pred_meter.instance()] = np.abs(ground_truth_energy - predicted_energy)
return pd.Series(errors)
def fraction_energy_assigned_correctly(predictions, ground_truth):
'''Compute fraction of energy assigned correctly
.. math::
fraction =
\\sum_n min \\left (
\\frac{\\sum_n y}{\\sum_{n,t} y},
\\frac{\\sum_n \\hat{y}}{\\sum_{n,t} \\hat{y}}
\\right )
Ignores distinction between different AC types, instead if there are
multiple AC types for each meter then we just take the max value across
the AC types.
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
fraction : float in the range [0,1]
Fraction of Energy Correctly Assigned.
'''
predictions_submeters = MeterGroup(meters=predictions.submeters().meters)
ground_truth_submeters = MeterGroup(meters=ground_truth.submeters().meters)
fraction_per_meter_predictions = predictions_submeters.fraction_per_meter()
fraction_per_meter_ground_truth = ground_truth_submeters.fraction_per_meter()
fraction_per_meter_ground_truth.index = fraction_per_meter_ground_truth.index.map(lambda meter: meter.instance)
fraction_per_meter_predictions.index = fraction_per_meter_predictions.index.map(lambda meter: meter.instance)
fraction = 0
for meter_instance in predictions_submeters.instance():
fraction += min(fraction_per_meter_ground_truth[meter_instance],
fraction_per_meter_predictions[meter_instance])
return fraction
def mean_normalized_error_power(predictions, ground_truth):
'''Compute mean normalized error in assigned power
.. math::
error^{(n)} =
\\frac
{ \\sum_t {\\left | y_t^{(n)} - \\hat{y}_t^{(n)} \\right |} }
{ \\sum_t y_t^{(n)} }
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
mne : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the MNE for that appliance.
'''
mne = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
total_abs_diff = 0.0
sum_of_ground_truth_power = 0.0
for aligned_meters_chunk in align_two_meters(pred_meter,
ground_truth_meter):
diff = aligned_meters_chunk.icol(0) - aligned_meters_chunk.icol(1)
total_abs_diff += sum(abs(diff.dropna()))
sum_of_ground_truth_power += aligned_meters_chunk.icol(1).sum()
mne[pred_meter.instance()] = total_abs_diff / sum_of_ground_truth_power
return pd.Series(mne)
def rms_error_power(predictions, ground_truth):
'''Compute RMS error in assigned power
.. math::
error^{(n)} = \\sqrt{ \\frac{1}{T} \\sum_t{ \\left ( y_t - \\hat{y}_t \\right )^2 } }
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
error : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the RMS error in predicted power for that appliance.
'''
error = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
sum_of_squared_diff = 0.0
n_samples = 0
for aligned_meters_chunk in align_two_meters(pred_meter,
ground_truth_meter):
diff = aligned_meters_chunk.icol(0) - aligned_meters_chunk.icol(1)
diff.dropna(inplace=True)
sum_of_squared_diff += (diff ** 2).sum()
n_samples += len(diff)
error[pred_meter.instance()] = math.sqrt(sum_of_squared_diff / n_samples)
return pd.Series(error)
def f1_score(predictions, ground_truth):
'''Compute F1 scores.
.. math::
F_{score}^{(n)} = \\frac
{2 * Precision * Recall}
{Precision + Recall}
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
f1_scores : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the F1 score for that appliance. If there are multiple
chunks then the value is the weighted mean of the F1 score for
each chunk.
'''
# If we import sklearn at top of file then sphinx breaks.
from sklearn.metrics import f1_score as sklearn_f1_score
# sklearn produces lots of DepreciationWarnings with PyTables
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
f1_scores = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
scores_for_meter = pd.DataFrame(columns=['score', 'num_samples'])
aligned_meters = align_two_meters(
pred_meter, ground_truth_meter, 'when_on')
for aligned_states_chunk in aligned_meters:
aligned_states_chunk.dropna(inplace=True)
aligned_states_chunk = aligned_states_chunk.astype(int)
score = sklearn_f1_score(aligned_states_chunk.icol(0),
aligned_states_chunk.icol(1))
scores_for_meter = scores_for_meter.append(
{'score': score, 'num_samples': len(aligned_states_chunk)},
ignore_index=True)
# Calculate weighted mean
num_samples = scores_for_meter['num_samples'].sum()
if num_samples > 0:
scores_for_meter['proportion'] = (
scores_for_meter['num_samples'] / num_samples)
avg_score = (
scores_for_meter['score'] * scores_for_meter['proportion']
).sum()
else:
warn("No aligned samples when calculating F1-score for prediction"
" meter {} and ground truth meter {}."
.format(pred_meter, ground_truth_meter))
avg_score = np.NaN
f1_scores[pred_meter.instance()] = avg_score
return pd.Series(f1_scores)
##### FUNCTIONS BELOW THIS LINE HAVE NOT YET BEEN CONVERTED TO NILMTK v0.2 #####
"""
def confusion_matrices(predicted_states, ground_truth_states):
'''Compute confusion matrix between appliance states for each appliance
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
dict of type {appliance : confusion matrix}
'''
re = {}
for appliance in predicted_states:
matrix = np.zeros([np.max(ground_truth_states[appliance]) + 1,
np.max(ground_truth_states[appliance]) + 1])
for time in predicted_states[appliance]:
matrix[predicted_states.values[time, appliance],
ground_truth_states.values[time, appliance]] += 1
re[appliance] = matrix
return re
def tp_fp_fn_tn(predicted_states, ground_truth_states):
'''Compute counts of True Positives, False Positives, False Negatives, True Negatives
.. math::
TP^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = on, \\hat{x}^{(n)}_t = on \\right )
FP^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = off, \\hat{x}^{(n)}_t = on \\right )
FN^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = on, \\hat{x}^{(n)}_t = off \\right )
TN^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = off, \\hat{x}^{(n)}_t = off \\right )
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
numpy array where columns represent appliances and rows represent: [TP, FP, FN, TN]
'''
# assumes state 0 = off, all other states = on
predicted_states_on = predicted_states > 0
ground_truth_states_on = ground_truth_states > 0
tp = np.sum(np.logical_and(predicted_states_on.values == True,
ground_truth_states_on.values == True), axis=0)
fp = np.sum(np.logical_and(predicted_states_on.values == True,
ground_truth_states_on.values == False), axis=0)
fn = np.sum(np.logical_and(predicted_states_on.values == False,
ground_truth_states_on.values == True), axis=0)
tn = np.sum(np.logical_and(predicted_states_on.values == False,
ground_truth_states_on.values == False), axis=0)
return np.array([tp, fp, fn, tn]).astype(float)
def tpr_fpr(predicted_states, ground_truth_states):
'''Compute True Positive Rate and False Negative Rate
.. math::
TPR^{(n)} = \\frac{TP}{\\left ( TP + FN \\right )}
FPR^{(n)} = \\frac{FP}{\\left ( FP + TN \\right )}
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
numpy array where columns represent appliances and rows represent: [TPR, FPR]
'''
tfpn = tp_fp_fn_tn(predicted_states, ground_truth_states)
tpr = tfpn[0, :] / (tfpn[0, :] + tfpn[2, :])
fpr = tfpn[1, :] / (tfpn[1, :] + tfpn[3, :])
return np.array([tpr, fpr])
def precision_recall(predicted_states, ground_truth_states):
'''Compute Precision and Recall
.. math::
Precision^{(n)} = \\frac{TP}{\\left ( TP + FP \\right )}
Recall^{(n)} = \\frac{TP}{\\left ( TP + FN \\right )}
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
numpy array where columns represent appliances and rows represent: [Precision, Recall]
'''
tfpn = tp_fp_fn_tn(predicted_states, ground_truth_states)
prec = tfpn[0, :] / (tfpn[0, :] + tfpn[1, :])
rec = tfpn[0, :] / (tfpn[0, :] + tfpn[2, :])
return np.array([prec, rec])
def hamming_loss(predicted_state, ground_truth_state):
'''Compute Hamming loss
.. math::
HammingLoss =
\\frac{1}{T} \\sum_{t}
\\frac{1}{N} \\sum_{n}
xor \\left ( x^{(n)}_t, \\hat{x}^{(n)}_t \\right )
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
float of hamming_loss
'''
num_appliances = np.size(ground_truth_state.values, axis=1)
xors = np.sum((predicted_state.values != ground_truth_state.values),
axis=1) / num_appliances
return np.mean(xors)
"""
| |
from subprocess import call
import re
from common import *
class _AnyNumber(int):
def __eq__(self, other):
return True
ANY = _AnyNumber(1)
class PathTester(object):
def __init__(self, test, root):
self.test = test
self.root = root
self.paths = []
self.matched = set()
self.scan()
def scan(self):
paths = []
for dir_name, dir_names, file_names in os.walk(self.root):
for name in dir_names:
path = os.path.join(dir_name, name)[len(self.root):] + '/'
paths.append(path)
for name in file_names:
path = os.path.join(dir_name, name)[len(self.root):]
paths.append(path)
for pattern in (r'\._', r'\.DS_Store$', r'.*\.pyc$'):
paths = [path for path in paths if not re.match(pattern, path)]
paths = [x for x in paths if x not in self.matched]
self.paths.extend(paths)
print 'PathTester found', len(paths), 'new items'
# if paths:
# print '\n'.join('\t' + x for x in sorted(paths))
def __enter__(self):
self.scan()
def __exit__(self, *args):
self.assertMatchedAll()
def assertMatches(self, count, pattern, mode=None, msg=None):
if not pattern:
self.fail('no pattern specified')
full_pattern = pattern + r'$'
if full_pattern[0] != '/':
full_pattern = r'(?:/[^/]*)*?/' + full_pattern
paths = self.paths
self.paths = []
for path in paths:
if re.match(full_pattern, path):
if mode is None:
test_mode = 0777 if path.endswith('/') else 0666
else:
test_mode = mode
stat = os.stat(os.path.join(self.root, path.strip('/')))
self.test.assertEqual(stat.st_mode & 0777, test_mode, 'permissions differ on %r; %o != %o' % (path, stat.st_mode & 0777, test_mode))
self.matched.add(path)
else:
self.paths.append(path)
self.test.assertEqual(
count,
len(paths) - len(self.paths),
msg or ('found %d, expected %d via %r; %d remain:\n\t' % (len(paths) - len(self.paths), count, pattern, len(self.paths))) + '\n\t'.join(sorted(self.paths))
)
def assertMatchedAll(self, msg=None):
self.test.assertFalse(self.paths, msg or ('%d paths remain:\n\t' % len(self.paths)) + '\n\t'.join(sorted(self.paths)))
def assertProject(self):
self.assertMatches(1, r'/Assets/')
self.assertMatches(1, r'/SEQ/')
self.assertMatches(1, r'/\.sgfs/')
self.assertMatches(1, r'/\.sgfs/cache\.sqlite')
self.assertMatches(1, r'/\.sgfs\.yml')
def assertAssetType(self, count):
self.assertMatches(count, r'/Assets/(Character|Vehicle)/')
def assertAsset(self, count):
self.assertMatches(count, r'/Assets/(Character|Vehicle)/(\1_\d+)/')
self.assertMatches(count, r'/Assets/(Character|Vehicle)/(\1_\d+)/\.sgfs\.yml')
def assertAssetTask(self, count, type_, **kwargs):
self._assertTask(count, r'/Assets/(Character|Vehicle)/(\1_\d+)', type_, **kwargs)
def _assertTask(self, count, base, type_):
self.assertMatches(count, base + r'/%s/' % type_)
self.assertMatches(ANY, base + r'/%s/\.sgfs\.yml' % type_)
self.assertMatches(ANY, base + r'/%s/dailies/' % type_)
if type_ in ('Anm', 'Model', 'Light'):
self.assertMatches(count, base + r'/%s/maya/' % type_)
self.assertMatches(count, base + r'/%s/maya/images/' % type_)
self.assertMatches(count, base + r'/%s/maya/published/' % type_)
self.assertMatches(count, base + r'/%s/maya/scenes/' % type_)
self.assertMatches(count, base + r'/%s/maya/sourceimages/' % type_)
self.assertMatches(count, base + r'/%s/maya/workspace.mel' % type_)
if type_ in ('Comp', 'Light'):
self.assertMatches(count, base + r'/%s/nuke/' % type_)
self.assertMatches(count, base + r'/%s/nuke/published/' % type_)
self.assertMatches(count, base + r'/%s/nuke/renders/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/' % type_)
if type_ in ('Comp', ):
self.assertMatches(count, base + r'/%s/nuke/renders/cleanplates/' % type_)
self.assertMatches(count, base + r'/%s/nuke/renders/elements/' % type_)
self.assertMatches(count, base + r'/%s/nuke/renders/mattes/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/comp/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/precomp/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/precomp/cleanplate/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/precomp/elements/' % type_)
self.assertMatches(count, base + r'/%s/nuke/scripts/precomp/roto/' % type_)
def assertSequence(self, count):
self.assertMatches(count, r'/SEQ/(\w{2})/')
self.assertMatches(count, r'/SEQ/(\w{2})/\.sgfs\.yml')
def assertShot(self, count):
self.assertMatches(count, r'/SEQ/(\w{2})/\1_\d{3}/')
self.assertMatches(count, r'/SEQ/(\w{2})/\1_\d{3}/\.sgfs\.yml')
self.assertMatches(count * 3, r'/SEQ/(\w{2})/\1_\d{3}/(Audio|Plates|Ref)/')
def assertShotTask(self, count, type_, **kwargs):
self._assertTask(count, r'/SEQ/(\w{2})/\1_\d{3}', type_, **kwargs)
def assertFullStructure(self):
self.assertProject()
self.assertAssetType(2)
self.assertAsset(4)
self.assertAssetTask(4, 'Anm')
self.assertAssetTask(4, 'Comp')
self.assertAssetTask(4, 'Light')
self.assertAssetTask(4, 'Model')
self.assertSequence(2)
self.assertShot(4)
self.assertShotTask(4, 'Anm')
self.assertShotTask(4, 'Comp')
self.assertShotTask(4, 'Light')
self.assertShotTask(4, 'Model')
self.assertMatchedAll()
class Base(TestCase):
def setUp(self):
sg = Shotgun()
self.sg = self.fix = fix = Fixture(sg)
self.proj_name = 'Test Project ' + mini_uuid()
proj = fix.Project(self.proj_name)
seqs = [proj.Sequence(code, project=proj) for code in ('AA', 'BB')]
shots = [seq.Shot('%s_%03d' % (seq['code'], i), project=proj) for seq in seqs for i in range(1, 3)]
steps = [fix.find_or_create('Step', code=code, short_name=code) for code in ('Anm', 'Comp', 'Light', 'Model')]
assets = [proj.Asset(sg_asset_type=type_, code="%s %d" % (type_, i)) for type_ in ('Character', 'Vehicle') for i in range(1, 3)]
tasks = [entity.Task(step['code'] + ' something', step=step, entity=entity, project=proj) for step in (steps + steps[-1:]) for entity in (shots + assets)]
self.proj = minimal(proj)
self.seqs = map(minimal, seqs)
self.shots = map(minimal, shots)
self.steps = map(minimal, steps)
self.tasks = map(minimal, tasks)
self.assets = map(minimal, assets)
self.session = Session(self.sg)
self.sgfs = SGFS(root=self.sandbox, session=self.session, schema_name='testing')
self = None
def pathTester(self):
return PathTester(self, os.path.join(self.sandbox, self.proj_name.replace(' ', '_')))
def create(self, entities, *args, **kwargs):
self.sgfs.create_structure(entities, *args, **kwargs)
class TestFullStructure(Base):
def test_full_structure(self):
self.create(self.tasks + self.assets, allow_project=True)
paths = self.pathTester()
paths.assertFullStructure()
class TestIncrementalStructure(Base):
def test_incremental_structure(self):
paths = self.pathTester()
proj = self.session.merge(self.proj)
proj.fetch('name')
self.create([proj], allow_project=True)
with paths:
paths.assertProject()
for seq in self.seqs:
self.create([seq])
with paths:
paths.assertSequence(1)
for asset in self.assets:
self.create([asset])
with paths:
paths.assertAssetType(ANY)
paths.assertAsset(1)
for shot in self.shots:
self.create([shot])
with paths:
paths.assertShot(1)
self.create(self.tasks)
with paths:
paths.assertAssetTask(len(self.assets), 'Anm')
paths.assertAssetTask(len(self.assets), 'Comp')
paths.assertAssetTask(len(self.assets), 'Light')
paths.assertAssetTask(len(self.assets), 'Model')
paths.assertShotTask(len(self.shots), 'Anm')
paths.assertShotTask(len(self.shots), 'Comp')
paths.assertShotTask(len(self.shots), 'Light')
paths.assertShotTask(len(self.shots), 'Model')
root = os.path.join(self.sandbox, self.proj_name.replace(' ', '_'))
self.assertEqual(1, len(self.sgfs.get_directory_entity_tags(root)))
self.assertEqual(1, len(self.sgfs.get_directory_entity_tags(root + '/SEQ/AA/AA_001/Anm')))
self.assertEqual(2, len(self.sgfs.get_directory_entity_tags(root + '/SEQ/AA/AA_001/Model')))
class TestMutatedStructure(Base):
def test_mutated_structure(self):
root = os.path.join(self.sandbox, self.proj_name.replace(' ', '_'))
paths = self.pathTester()
proj = self.session.merge(self.proj)
proj.fetch('name')
self.create([proj], allow_project=True)
with paths:
paths.assertProject()
for seq in self.seqs:
self.create([seq])
with paths:
paths.assertSequence(1)
# Mutate the sequences, and rebuild the cache.
call(['mv', root + '/SEQ/AA', root + '/SEQ/XX'])
call(['mv', root + '/SEQ/BB', root + '/SEQ_BB'])
print '==== MUTATION ===='
print 'Rebuilding cache...'
self.sgfs.rebuild_cache(root, recurse=True)
print 'Recreating structure...'
self.create(self.shots)
print 'Scanning for changes...'
paths.scan()
paths.assertMatches(2, r'SEQ/XX/AA_\d+/')
paths.assertMatches(2, r'SEQ/XX/AA_\d+/\.sgfs\.yml')
paths.assertMatches(2, r'SEQ_BB/BB_\d+/')
paths.assertMatches(2, r'SEQ_BB/BB_\d+/\.sgfs\.yml')
tags = self.sgfs.get_directory_entity_tags(root + '/SEQ/XX/AA_001')
self.assertEqual(1, len(tags))
self.assertSameEntity(tags[0]['entity'], self.shots[0])
tags = self.sgfs.get_directory_entity_tags(root + '/SEQ_BB/BB_001')
self.assertEqual(1, len(tags))
self.assertSameEntity(tags[0]['entity'], self.shots[3])
class TestDryRun(Base):
def test_dry_run(self):
self.create(self.tasks + self.assets, dry_run=True)
paths = self.pathTester()
paths.assertMatchedAll()
class TestDisallowProject(TestCase):
def setUp(self):
sg = Shotgun()
self.sg = self.fix = fix = Fixture(sg)
self.proj_name = 'Test Project ' + mini_uuid()
self.proj = fix.Project(self.proj_name)
self.sgfs = SGFS(root=self.sandbox, shotgun=fix, schema_name='testing')
def tearDown(self):
self.fix.delete_all()
def test_disallow_project(self):
os.makedirs(os.path.join(self.sandbox, self.proj_name.replace(' ', '_')))
self.assertRaises(ValueError, self.sgfs.create_structure, [self.proj])
| |
# SoLoud C-Api Code Generator (c)2013-2015 Jari Komppa http://iki.fi/sol/
# Warning: This file is generated. Any manual changes will be overwritten.
# Data for SoLoud glue code generation
# Enumerations
soloud_enum = {
'SOLOUD_AUTO': 0,
'SOLOUD_SDL': 1,
'SOLOUD_SDL2': 2,
'SOLOUD_PORTAUDIO': 3,
'SOLOUD_WINMM': 4,
'SOLOUD_XAUDIO2': 5,
'SOLOUD_WASAPI': 6,
'SOLOUD_ALSA': 7,
'SOLOUD_OSS': 8,
'SOLOUD_OPENAL': 9,
'SOLOUD_COREAUDIO': 10,
'SOLOUD_OPENSLES': 11,
'SOLOUD_NULLDRIVER': 12,
'SOLOUD_BACKEND_MAX': 13,
'SOLOUD_CLIP_ROUNDOFF': 1,
'SOLOUD_ENABLE_VISUALIZATION': 2,
'SOLOUD_LEFT_HANDED_3D': 4,
'BIQUADRESONANTFILTER_NONE': 0,
'BIQUADRESONANTFILTER_LOWPASS': 1,
'BIQUADRESONANTFILTER_HIGHPASS': 2,
'BIQUADRESONANTFILTER_BANDPASS': 3,
'BIQUADRESONANTFILTER_WET': 0,
'BIQUADRESONANTFILTER_SAMPLERATE': 1,
'BIQUADRESONANTFILTER_FREQUENCY': 2,
'BIQUADRESONANTFILTER_RESONANCE': 3,
'LOFIFILTER_WET': 0,
'LOFIFILTER_SAMPLERATE': 1,
'LOFIFILTER_BITDEPTH': 2,
'BASSBOOSTFILTER_WET': 0,
'BASSBOOSTFILTER_BOOST': 1,
'SFXR_COIN': 0,
'SFXR_LASER': 1,
'SFXR_EXPLOSION': 2,
'SFXR_POWERUP': 3,
'SFXR_HURT': 4,
'SFXR_JUMP': 5,
'SFXR_BLIP': 6,
'FLANGERFILTER_WET': 0,
'FLANGERFILTER_DELAY': 1,
'FLANGERFILTER_FREQ': 2,
'MONOTONE_SQUARE': 0,
'MONOTONE_SAW': 1,
'MONOTONE_SIN': 2,
'MONOTONE_SAWSIN': 3
}
# Handle types
soloud_type = [
'AlignedFloatBuffer',
'Soloud',
'AudioCollider',
'AudioAttenuator',
'AudioSource',
'BiquadResonantFilter',
'LofiFilter',
'Bus',
'EchoFilter',
'Fader',
'FFTFilter',
'BassboostFilter',
'Filter',
'Speech',
'Wav',
'WavStream',
'Prg',
'Sfxr',
'FlangerFilter',
'DCRemovalFilter',
'Modplug',
'Monotone',
'TedSid'
]
# Functions
# [return type, function name, [[param type, param name], ...], ...]
soloud_func = [
['void', 'Soloud_destroy', [['Soloud *', 'aSoloud']]],
['Soloud *', 'Soloud_create', [[]]],
['int', 'Soloud_init', [['Soloud *', 'aSoloud']]],
['int', 'Soloud_initEx', [['Soloud *', 'aSoloud'], ['unsigned int', 'aFlags', 'Soloud::CLIP_ROUNDOFF'], ['unsigned int', 'aBackend', 'Soloud::AUTO'], ['unsigned int', 'aSamplerate', 'Soloud::AUTO'], ['unsigned int', 'aBufferSize', 'Soloud::AUTO'], ['unsigned int', 'aChannels', '2']]],
['void', 'Soloud_deinit', [['Soloud *', 'aSoloud']]],
['unsigned int', 'Soloud_getVersion', [['Soloud *', 'aSoloud']]],
['const char *', 'Soloud_getErrorString', [['Soloud *', 'aSoloud'], ['int', 'aErrorCode']]],
['unsigned int', 'Soloud_getBackendId', [['Soloud *', 'aSoloud']]],
['const char *', 'Soloud_getBackendString', [['Soloud *', 'aSoloud']]],
['unsigned int', 'Soloud_getBackendChannels', [['Soloud *', 'aSoloud']]],
['unsigned int', 'Soloud_getBackendSamplerate', [['Soloud *', 'aSoloud']]],
['unsigned int', 'Soloud_getBackendBufferSize', [['Soloud *', 'aSoloud']]],
['int', 'Soloud_setSpeakerPosition', [['Soloud *', 'aSoloud'], ['unsigned int', 'aChannel'], ['float', 'aX'], ['float', 'aY'], ['float', 'aZ']]],
['unsigned int', 'Soloud_play', [['Soloud *', 'aSoloud'], ['AudioSource *', 'aSound']]],
['unsigned int', 'Soloud_playEx', [['Soloud *', 'aSoloud'], ['AudioSource *', 'aSound'], ['float', 'aVolume', '-1.0f'], ['float', 'aPan', '0.0f'], ['int', 'aPaused', '0'], ['unsigned int', 'aBus', '0']]],
['unsigned int', 'Soloud_playClocked', [['Soloud *', 'aSoloud'], ['double', 'aSoundTime'], ['AudioSource *', 'aSound']]],
['unsigned int', 'Soloud_playClockedEx', [['Soloud *', 'aSoloud'], ['double', 'aSoundTime'], ['AudioSource *', 'aSound'], ['float', 'aVolume', '-1.0f'], ['float', 'aPan', '0.0f'], ['unsigned int', 'aBus', '0']]],
['unsigned int', 'Soloud_play3d', [['Soloud *', 'aSoloud'], ['AudioSource *', 'aSound'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ']]],
['unsigned int', 'Soloud_play3dEx', [['Soloud *', 'aSoloud'], ['AudioSource *', 'aSound'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ'], ['float', 'aVelX', '0.0f'], ['float', 'aVelY', '0.0f'], ['float', 'aVelZ', '0.0f'], ['float', 'aVolume', '1.0f'], ['int', 'aPaused', '0'], ['unsigned int', 'aBus', '0']]],
['unsigned int', 'Soloud_play3dClocked', [['Soloud *', 'aSoloud'], ['double', 'aSoundTime'], ['AudioSource *', 'aSound'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ']]],
['unsigned int', 'Soloud_play3dClockedEx', [['Soloud *', 'aSoloud'], ['double', 'aSoundTime'], ['AudioSource *', 'aSound'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ'], ['float', 'aVelX', '0.0f'], ['float', 'aVelY', '0.0f'], ['float', 'aVelZ', '0.0f'], ['float', 'aVolume', '1.0f'], ['unsigned int', 'aBus', '0']]],
['void', 'Soloud_seek', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['double', 'aSeconds']]],
['void', 'Soloud_stop', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['void', 'Soloud_stopAll', [['Soloud *', 'aSoloud']]],
['void', 'Soloud_stopAudioSource', [['Soloud *', 'aSoloud'], ['AudioSource *', 'aSound']]],
['void', 'Soloud_setFilterParameter', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['unsigned int', 'aFilterId'], ['unsigned int', 'aAttributeId'], ['float', 'aValue']]],
['float', 'Soloud_getFilterParameter', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['unsigned int', 'aFilterId'], ['unsigned int', 'aAttributeId']]],
['void', 'Soloud_fadeFilterParameter', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['unsigned int', 'aFilterId'], ['unsigned int', 'aAttributeId'], ['float', 'aTo'], ['double', 'aTime']]],
['void', 'Soloud_oscillateFilterParameter', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['unsigned int', 'aFilterId'], ['unsigned int', 'aAttributeId'], ['float', 'aFrom'], ['float', 'aTo'], ['double', 'aTime']]],
['double', 'Soloud_getStreamTime', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['int', 'Soloud_getPause', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['float', 'Soloud_getVolume', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['float', 'Soloud_getOverallVolume', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['float', 'Soloud_getPan', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['float', 'Soloud_getSamplerate', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['int', 'Soloud_getProtectVoice', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['unsigned int', 'Soloud_getActiveVoiceCount', [['Soloud *', 'aSoloud']]],
['unsigned int', 'Soloud_getVoiceCount', [['Soloud *', 'aSoloud']]],
['int', 'Soloud_isValidVoiceHandle', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['float', 'Soloud_getRelativePlaySpeed', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['float', 'Soloud_getPostClipScaler', [['Soloud *', 'aSoloud']]],
['float', 'Soloud_getGlobalVolume', [['Soloud *', 'aSoloud']]],
['unsigned int', 'Soloud_getMaxActiveVoiceCount', [['Soloud *', 'aSoloud']]],
['int', 'Soloud_getLooping', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['void', 'Soloud_setLooping', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['int', 'aLooping']]],
['int', 'Soloud_setMaxActiveVoiceCount', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceCount']]],
['void', 'Soloud_setInaudibleBehavior', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['int', 'aMustTick'], ['int', 'aKill']]],
['void', 'Soloud_setGlobalVolume', [['Soloud *', 'aSoloud'], ['float', 'aVolume']]],
['void', 'Soloud_setPostClipScaler', [['Soloud *', 'aSoloud'], ['float', 'aScaler']]],
['void', 'Soloud_setPause', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['int', 'aPause']]],
['void', 'Soloud_setPauseAll', [['Soloud *', 'aSoloud'], ['int', 'aPause']]],
['int', 'Soloud_setRelativePlaySpeed', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aSpeed']]],
['void', 'Soloud_setProtectVoice', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['int', 'aProtect']]],
['void', 'Soloud_setSamplerate', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aSamplerate']]],
['void', 'Soloud_setPan', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aPan']]],
['void', 'Soloud_setPanAbsolute', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aLVolume'], ['float', 'aRVolume']]],
['void', 'Soloud_setPanAbsoluteEx', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aLVolume'], ['float', 'aRVolume'], ['float', 'aLBVolume', '0'], ['float', 'aRBVolume', '0'], ['float', 'aCVolume', '0'], ['float', 'aSVolume', '0']]],
['void', 'Soloud_setVolume', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aVolume']]],
['void', 'Soloud_setDelaySamples', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['unsigned int', 'aSamples']]],
['void', 'Soloud_fadeVolume', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aTo'], ['double', 'aTime']]],
['void', 'Soloud_fadePan', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aTo'], ['double', 'aTime']]],
['void', 'Soloud_fadeRelativePlaySpeed', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aTo'], ['double', 'aTime']]],
['void', 'Soloud_fadeGlobalVolume', [['Soloud *', 'aSoloud'], ['float', 'aTo'], ['double', 'aTime']]],
['void', 'Soloud_schedulePause', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['double', 'aTime']]],
['void', 'Soloud_scheduleStop', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['double', 'aTime']]],
['void', 'Soloud_oscillateVolume', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aFrom'], ['float', 'aTo'], ['double', 'aTime']]],
['void', 'Soloud_oscillatePan', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aFrom'], ['float', 'aTo'], ['double', 'aTime']]],
['void', 'Soloud_oscillateRelativePlaySpeed', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aFrom'], ['float', 'aTo'], ['double', 'aTime']]],
['void', 'Soloud_oscillateGlobalVolume', [['Soloud *', 'aSoloud'], ['float', 'aFrom'], ['float', 'aTo'], ['double', 'aTime']]],
['void', 'Soloud_setGlobalFilter', [['Soloud *', 'aSoloud'], ['unsigned int', 'aFilterId'], ['Filter *', 'aFilter']]],
['void', 'Soloud_setVisualizationEnable', [['Soloud *', 'aSoloud'], ['int', 'aEnable']]],
['float *', 'Soloud_calcFFT', [['Soloud *', 'aSoloud']]],
['float *', 'Soloud_getWave', [['Soloud *', 'aSoloud']]],
['unsigned int', 'Soloud_getLoopCount', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle']]],
['float', 'Soloud_getInfo', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['unsigned int', 'aInfoKey']]],
['unsigned int', 'Soloud_createVoiceGroup', [['Soloud *', 'aSoloud']]],
['int', 'Soloud_destroyVoiceGroup', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceGroupHandle']]],
['int', 'Soloud_addVoiceToGroup', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceGroupHandle'], ['unsigned int', 'aVoiceHandle']]],
['int', 'Soloud_isVoiceGroup', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceGroupHandle']]],
['int', 'Soloud_isVoiceGroupEmpty', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceGroupHandle']]],
['void', 'Soloud_update3dAudio', [['Soloud *', 'aSoloud']]],
['int', 'Soloud_set3dSoundSpeed', [['Soloud *', 'aSoloud'], ['float', 'aSpeed']]],
['float', 'Soloud_get3dSoundSpeed', [['Soloud *', 'aSoloud']]],
['void', 'Soloud_set3dListenerParameters', [['Soloud *', 'aSoloud'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ'], ['float', 'aAtX'], ['float', 'aAtY'], ['float', 'aAtZ'], ['float', 'aUpX'], ['float', 'aUpY'], ['float', 'aUpZ']]],
['void', 'Soloud_set3dListenerParametersEx', [['Soloud *', 'aSoloud'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ'], ['float', 'aAtX'], ['float', 'aAtY'], ['float', 'aAtZ'], ['float', 'aUpX'], ['float', 'aUpY'], ['float', 'aUpZ'], ['float', 'aVelocityX', '0.0f'], ['float', 'aVelocityY', '0.0f'], ['float', 'aVelocityZ', '0.0f']]],
['void', 'Soloud_set3dListenerPosition', [['Soloud *', 'aSoloud'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ']]],
['void', 'Soloud_set3dListenerAt', [['Soloud *', 'aSoloud'], ['float', 'aAtX'], ['float', 'aAtY'], ['float', 'aAtZ']]],
['void', 'Soloud_set3dListenerUp', [['Soloud *', 'aSoloud'], ['float', 'aUpX'], ['float', 'aUpY'], ['float', 'aUpZ']]],
['void', 'Soloud_set3dListenerVelocity', [['Soloud *', 'aSoloud'], ['float', 'aVelocityX'], ['float', 'aVelocityY'], ['float', 'aVelocityZ']]],
['void', 'Soloud_set3dSourceParameters', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ']]],
['void', 'Soloud_set3dSourceParametersEx', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ'], ['float', 'aVelocityX', '0.0f'], ['float', 'aVelocityY', '0.0f'], ['float', 'aVelocityZ', '0.0f']]],
['void', 'Soloud_set3dSourcePosition', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ']]],
['void', 'Soloud_set3dSourceVelocity', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aVelocityX'], ['float', 'aVelocityY'], ['float', 'aVelocityZ']]],
['void', 'Soloud_set3dSourceMinMaxDistance', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aMinDistance'], ['float', 'aMaxDistance']]],
['void', 'Soloud_set3dSourceAttenuation', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['unsigned int', 'aAttenuationModel'], ['float', 'aAttenuationRolloffFactor']]],
['void', 'Soloud_set3dSourceDopplerFactor', [['Soloud *', 'aSoloud'], ['unsigned int', 'aVoiceHandle'], ['float', 'aDopplerFactor']]],
['void', 'Soloud_mix', [['Soloud *', 'aSoloud'], ['float *', 'aBuffer'], ['unsigned int', 'aSamples']]],
['void', 'Soloud_mixSigned16', [['Soloud *', 'aSoloud'], ['short *', 'aBuffer'], ['unsigned int', 'aSamples']]],
['void', 'AudioAttenuator_destroy', [['AudioAttenuator *', 'aAudioAttenuator']]],
['float', 'AudioAttenuator_attenuate', [['AudioAttenuator *', 'aAudioAttenuator'], ['float', 'aDistance'], ['float', 'aMinDistance'], ['float', 'aMaxDistance'], ['float', 'aRolloffFactor']]],
['void', 'BiquadResonantFilter_destroy', [['BiquadResonantFilter *', 'aBiquadResonantFilter']]],
['BiquadResonantFilter *', 'BiquadResonantFilter_create', [[]]],
['int', 'BiquadResonantFilter_setParams', [['BiquadResonantFilter *', 'aBiquadResonantFilter'], ['int', 'aType'], ['float', 'aSampleRate'], ['float', 'aFrequency'], ['float', 'aResonance']]],
['void', 'LofiFilter_destroy', [['LofiFilter *', 'aLofiFilter']]],
['LofiFilter *', 'LofiFilter_create', [[]]],
['int', 'LofiFilter_setParams', [['LofiFilter *', 'aLofiFilter'], ['float', 'aSampleRate'], ['float', 'aBitdepth']]],
['void', 'Bus_destroy', [['Bus *', 'aBus']]],
['Bus *', 'Bus_create', [[]]],
['void', 'Bus_setFilter', [['Bus *', 'aBus'], ['unsigned int', 'aFilterId'], ['Filter *', 'aFilter']]],
['unsigned int', 'Bus_play', [['Bus *', 'aBus'], ['AudioSource *', 'aSound']]],
['unsigned int', 'Bus_playEx', [['Bus *', 'aBus'], ['AudioSource *', 'aSound'], ['float', 'aVolume', '1.0f'], ['float', 'aPan', '0.0f'], ['int', 'aPaused', '0']]],
['unsigned int', 'Bus_playClocked', [['Bus *', 'aBus'], ['double', 'aSoundTime'], ['AudioSource *', 'aSound']]],
['unsigned int', 'Bus_playClockedEx', [['Bus *', 'aBus'], ['double', 'aSoundTime'], ['AudioSource *', 'aSound'], ['float', 'aVolume', '1.0f'], ['float', 'aPan', '0.0f']]],
['unsigned int', 'Bus_play3d', [['Bus *', 'aBus'], ['AudioSource *', 'aSound'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ']]],
['unsigned int', 'Bus_play3dEx', [['Bus *', 'aBus'], ['AudioSource *', 'aSound'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ'], ['float', 'aVelX', '0.0f'], ['float', 'aVelY', '0.0f'], ['float', 'aVelZ', '0.0f'], ['float', 'aVolume', '1.0f'], ['int', 'aPaused', '0']]],
['unsigned int', 'Bus_play3dClocked', [['Bus *', 'aBus'], ['double', 'aSoundTime'], ['AudioSource *', 'aSound'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ']]],
['unsigned int', 'Bus_play3dClockedEx', [['Bus *', 'aBus'], ['double', 'aSoundTime'], ['AudioSource *', 'aSound'], ['float', 'aPosX'], ['float', 'aPosY'], ['float', 'aPosZ'], ['float', 'aVelX', '0.0f'], ['float', 'aVelY', '0.0f'], ['float', 'aVelZ', '0.0f'], ['float', 'aVolume', '1.0f']]],
['int', 'Bus_setChannels', [['Bus *', 'aBus'], ['unsigned int', 'aChannels']]],
['void', 'Bus_setVisualizationEnable', [['Bus *', 'aBus'], ['int', 'aEnable']]],
['float *', 'Bus_calcFFT', [['Bus *', 'aBus']]],
['float *', 'Bus_getWave', [['Bus *', 'aBus']]],
['void', 'Bus_setVolume', [['Bus *', 'aBus'], ['float', 'aVolume']]],
['void', 'Bus_setLooping', [['Bus *', 'aBus'], ['int', 'aLoop']]],
['void', 'Bus_set3dMinMaxDistance', [['Bus *', 'aBus'], ['float', 'aMinDistance'], ['float', 'aMaxDistance']]],
['void', 'Bus_set3dAttenuation', [['Bus *', 'aBus'], ['unsigned int', 'aAttenuationModel'], ['float', 'aAttenuationRolloffFactor']]],
['void', 'Bus_set3dDopplerFactor', [['Bus *', 'aBus'], ['float', 'aDopplerFactor']]],
['void', 'Bus_set3dProcessing', [['Bus *', 'aBus'], ['int', 'aDo3dProcessing']]],
['void', 'Bus_set3dListenerRelative', [['Bus *', 'aBus'], ['int', 'aListenerRelative']]],
['void', 'Bus_set3dDistanceDelay', [['Bus *', 'aBus'], ['int', 'aDistanceDelay']]],
['void', 'Bus_set3dCollider', [['Bus *', 'aBus'], ['AudioCollider *', 'aCollider']]],
['void', 'Bus_set3dColliderEx', [['Bus *', 'aBus'], ['AudioCollider *', 'aCollider'], ['int', 'aUserData', '0']]],
['void', 'Bus_set3dAttenuator', [['Bus *', 'aBus'], ['AudioAttenuator *', 'aAttenuator']]],
['void', 'Bus_setInaudibleBehavior', [['Bus *', 'aBus'], ['int', 'aMustTick'], ['int', 'aKill']]],
['void', 'Bus_stop', [['Bus *', 'aBus']]],
['void', 'EchoFilter_destroy', [['EchoFilter *', 'aEchoFilter']]],
['EchoFilter *', 'EchoFilter_create', [[]]],
['int', 'EchoFilter_setParams', [['EchoFilter *', 'aEchoFilter'], ['float', 'aDelay']]],
['int', 'EchoFilter_setParamsEx', [['EchoFilter *', 'aEchoFilter'], ['float', 'aDelay'], ['float', 'aDecay', '0.7f'], ['float', 'aFilter', '0.0f']]],
['void', 'FFTFilter_destroy', [['FFTFilter *', 'aFFTFilter']]],
['FFTFilter *', 'FFTFilter_create', [[]]],
['void', 'BassboostFilter_destroy', [['BassboostFilter *', 'aBassboostFilter']]],
['int', 'BassboostFilter_setParams', [['BassboostFilter *', 'aBassboostFilter'], ['float', 'aBoost']]],
['BassboostFilter *', 'BassboostFilter_create', [[]]],
['void', 'Speech_destroy', [['Speech *', 'aSpeech']]],
['Speech *', 'Speech_create', [[]]],
['int', 'Speech_setText', [['Speech *', 'aSpeech'], ['const char *', 'aText']]],
['void', 'Speech_setVolume', [['Speech *', 'aSpeech'], ['float', 'aVolume']]],
['void', 'Speech_setLooping', [['Speech *', 'aSpeech'], ['int', 'aLoop']]],
['void', 'Speech_set3dMinMaxDistance', [['Speech *', 'aSpeech'], ['float', 'aMinDistance'], ['float', 'aMaxDistance']]],
['void', 'Speech_set3dAttenuation', [['Speech *', 'aSpeech'], ['unsigned int', 'aAttenuationModel'], ['float', 'aAttenuationRolloffFactor']]],
['void', 'Speech_set3dDopplerFactor', [['Speech *', 'aSpeech'], ['float', 'aDopplerFactor']]],
['void', 'Speech_set3dProcessing', [['Speech *', 'aSpeech'], ['int', 'aDo3dProcessing']]],
['void', 'Speech_set3dListenerRelative', [['Speech *', 'aSpeech'], ['int', 'aListenerRelative']]],
['void', 'Speech_set3dDistanceDelay', [['Speech *', 'aSpeech'], ['int', 'aDistanceDelay']]],
['void', 'Speech_set3dCollider', [['Speech *', 'aSpeech'], ['AudioCollider *', 'aCollider']]],
['void', 'Speech_set3dColliderEx', [['Speech *', 'aSpeech'], ['AudioCollider *', 'aCollider'], ['int', 'aUserData', '0']]],
['void', 'Speech_set3dAttenuator', [['Speech *', 'aSpeech'], ['AudioAttenuator *', 'aAttenuator']]],
['void', 'Speech_setInaudibleBehavior', [['Speech *', 'aSpeech'], ['int', 'aMustTick'], ['int', 'aKill']]],
['void', 'Speech_setFilter', [['Speech *', 'aSpeech'], ['unsigned int', 'aFilterId'], ['Filter *', 'aFilter']]],
['void', 'Speech_stop', [['Speech *', 'aSpeech']]],
['void', 'Wav_destroy', [['Wav *', 'aWav']]],
['Wav *', 'Wav_create', [[]]],
['int', 'Wav_load', [['Wav *', 'aWav'], ['const char *', 'aFilename']]],
['int', 'Wav_loadMem', [['Wav *', 'aWav'], ['unsigned char *', 'aMem'], ['unsigned int', 'aLength']]],
['int', 'Wav_loadMemEx', [['Wav *', 'aWav'], ['unsigned char *', 'aMem'], ['unsigned int', 'aLength'], ['int', 'aCopy', 'false'], ['int', 'aTakeOwnership', 'true']]],
['int', 'Wav_loadFile', [['Wav *', 'aWav'], ['File *', 'aFile']]],
['double', 'Wav_getLength', [['Wav *', 'aWav']]],
['void', 'Wav_setVolume', [['Wav *', 'aWav'], ['float', 'aVolume']]],
['void', 'Wav_setLooping', [['Wav *', 'aWav'], ['int', 'aLoop']]],
['void', 'Wav_set3dMinMaxDistance', [['Wav *', 'aWav'], ['float', 'aMinDistance'], ['float', 'aMaxDistance']]],
['void', 'Wav_set3dAttenuation', [['Wav *', 'aWav'], ['unsigned int', 'aAttenuationModel'], ['float', 'aAttenuationRolloffFactor']]],
['void', 'Wav_set3dDopplerFactor', [['Wav *', 'aWav'], ['float', 'aDopplerFactor']]],
['void', 'Wav_set3dProcessing', [['Wav *', 'aWav'], ['int', 'aDo3dProcessing']]],
['void', 'Wav_set3dListenerRelative', [['Wav *', 'aWav'], ['int', 'aListenerRelative']]],
['void', 'Wav_set3dDistanceDelay', [['Wav *', 'aWav'], ['int', 'aDistanceDelay']]],
['void', 'Wav_set3dCollider', [['Wav *', 'aWav'], ['AudioCollider *', 'aCollider']]],
['void', 'Wav_set3dColliderEx', [['Wav *', 'aWav'], ['AudioCollider *', 'aCollider'], ['int', 'aUserData', '0']]],
['void', 'Wav_set3dAttenuator', [['Wav *', 'aWav'], ['AudioAttenuator *', 'aAttenuator']]],
['void', 'Wav_setInaudibleBehavior', [['Wav *', 'aWav'], ['int', 'aMustTick'], ['int', 'aKill']]],
['void', 'Wav_setFilter', [['Wav *', 'aWav'], ['unsigned int', 'aFilterId'], ['Filter *', 'aFilter']]],
['void', 'Wav_stop', [['Wav *', 'aWav']]],
['void', 'WavStream_destroy', [['WavStream *', 'aWavStream']]],
['WavStream *', 'WavStream_create', [[]]],
['int', 'WavStream_load', [['WavStream *', 'aWavStream'], ['const char *', 'aFilename']]],
['int', 'WavStream_loadMem', [['WavStream *', 'aWavStream'], ['unsigned char *', 'aData'], ['unsigned int', 'aDataLen']]],
['int', 'WavStream_loadMemEx', [['WavStream *', 'aWavStream'], ['unsigned char *', 'aData'], ['unsigned int', 'aDataLen'], ['int', 'aCopy', 'false'], ['int', 'aTakeOwnership', 'true']]],
['int', 'WavStream_loadToMem', [['WavStream *', 'aWavStream'], ['const char *', 'aFilename']]],
['int', 'WavStream_loadFile', [['WavStream *', 'aWavStream'], ['File *', 'aFile']]],
['int', 'WavStream_loadFileToMem', [['WavStream *', 'aWavStream'], ['File *', 'aFile']]],
['double', 'WavStream_getLength', [['WavStream *', 'aWavStream']]],
['void', 'WavStream_setVolume', [['WavStream *', 'aWavStream'], ['float', 'aVolume']]],
['void', 'WavStream_setLooping', [['WavStream *', 'aWavStream'], ['int', 'aLoop']]],
['void', 'WavStream_set3dMinMaxDistance', [['WavStream *', 'aWavStream'], ['float', 'aMinDistance'], ['float', 'aMaxDistance']]],
['void', 'WavStream_set3dAttenuation', [['WavStream *', 'aWavStream'], ['unsigned int', 'aAttenuationModel'], ['float', 'aAttenuationRolloffFactor']]],
['void', 'WavStream_set3dDopplerFactor', [['WavStream *', 'aWavStream'], ['float', 'aDopplerFactor']]],
['void', 'WavStream_set3dProcessing', [['WavStream *', 'aWavStream'], ['int', 'aDo3dProcessing']]],
['void', 'WavStream_set3dListenerRelative', [['WavStream *', 'aWavStream'], ['int', 'aListenerRelative']]],
['void', 'WavStream_set3dDistanceDelay', [['WavStream *', 'aWavStream'], ['int', 'aDistanceDelay']]],
['void', 'WavStream_set3dCollider', [['WavStream *', 'aWavStream'], ['AudioCollider *', 'aCollider']]],
['void', 'WavStream_set3dColliderEx', [['WavStream *', 'aWavStream'], ['AudioCollider *', 'aCollider'], ['int', 'aUserData', '0']]],
['void', 'WavStream_set3dAttenuator', [['WavStream *', 'aWavStream'], ['AudioAttenuator *', 'aAttenuator']]],
['void', 'WavStream_setInaudibleBehavior', [['WavStream *', 'aWavStream'], ['int', 'aMustTick'], ['int', 'aKill']]],
['void', 'WavStream_setFilter', [['WavStream *', 'aWavStream'], ['unsigned int', 'aFilterId'], ['Filter *', 'aFilter']]],
['void', 'WavStream_stop', [['WavStream *', 'aWavStream']]],
['void', 'Prg_destroy', [['Prg *', 'aPrg']]],
['Prg *', 'Prg_create', [[]]],
['unsigned int', 'Prg_rand', [['Prg *', 'aPrg']]],
['void', 'Prg_srand', [['Prg *', 'aPrg'], ['int', 'aSeed']]],
['void', 'Sfxr_destroy', [['Sfxr *', 'aSfxr']]],
['Sfxr *', 'Sfxr_create', [[]]],
['void', 'Sfxr_resetParams', [['Sfxr *', 'aSfxr']]],
['int', 'Sfxr_loadParams', [['Sfxr *', 'aSfxr'], ['const char *', 'aFilename']]],
['int', 'Sfxr_loadParamsMem', [['Sfxr *', 'aSfxr'], ['unsigned char *', 'aMem'], ['unsigned int', 'aLength']]],
['int', 'Sfxr_loadParamsMemEx', [['Sfxr *', 'aSfxr'], ['unsigned char *', 'aMem'], ['unsigned int', 'aLength'], ['int', 'aCopy', 'false'], ['int', 'aTakeOwnership', 'true']]],
['int', 'Sfxr_loadParamsFile', [['Sfxr *', 'aSfxr'], ['File *', 'aFile']]],
['int', 'Sfxr_loadPreset', [['Sfxr *', 'aSfxr'], ['int', 'aPresetNo'], ['int', 'aRandSeed']]],
['void', 'Sfxr_setVolume', [['Sfxr *', 'aSfxr'], ['float', 'aVolume']]],
['void', 'Sfxr_setLooping', [['Sfxr *', 'aSfxr'], ['int', 'aLoop']]],
['void', 'Sfxr_set3dMinMaxDistance', [['Sfxr *', 'aSfxr'], ['float', 'aMinDistance'], ['float', 'aMaxDistance']]],
['void', 'Sfxr_set3dAttenuation', [['Sfxr *', 'aSfxr'], ['unsigned int', 'aAttenuationModel'], ['float', 'aAttenuationRolloffFactor']]],
['void', 'Sfxr_set3dDopplerFactor', [['Sfxr *', 'aSfxr'], ['float', 'aDopplerFactor']]],
['void', 'Sfxr_set3dProcessing', [['Sfxr *', 'aSfxr'], ['int', 'aDo3dProcessing']]],
['void', 'Sfxr_set3dListenerRelative', [['Sfxr *', 'aSfxr'], ['int', 'aListenerRelative']]],
['void', 'Sfxr_set3dDistanceDelay', [['Sfxr *', 'aSfxr'], ['int', 'aDistanceDelay']]],
['void', 'Sfxr_set3dCollider', [['Sfxr *', 'aSfxr'], ['AudioCollider *', 'aCollider']]],
['void', 'Sfxr_set3dColliderEx', [['Sfxr *', 'aSfxr'], ['AudioCollider *', 'aCollider'], ['int', 'aUserData', '0']]],
['void', 'Sfxr_set3dAttenuator', [['Sfxr *', 'aSfxr'], ['AudioAttenuator *', 'aAttenuator']]],
['void', 'Sfxr_setInaudibleBehavior', [['Sfxr *', 'aSfxr'], ['int', 'aMustTick'], ['int', 'aKill']]],
['void', 'Sfxr_setFilter', [['Sfxr *', 'aSfxr'], ['unsigned int', 'aFilterId'], ['Filter *', 'aFilter']]],
['void', 'Sfxr_stop', [['Sfxr *', 'aSfxr']]],
['void', 'FlangerFilter_destroy', [['FlangerFilter *', 'aFlangerFilter']]],
['FlangerFilter *', 'FlangerFilter_create', [[]]],
['int', 'FlangerFilter_setParams', [['FlangerFilter *', 'aFlangerFilter'], ['float', 'aDelay'], ['float', 'aFreq']]],
['void', 'DCRemovalFilter_destroy', [['DCRemovalFilter *', 'aDCRemovalFilter']]],
['DCRemovalFilter *', 'DCRemovalFilter_create', [[]]],
['int', 'DCRemovalFilter_setParams', [['DCRemovalFilter *', 'aDCRemovalFilter']]],
['int', 'DCRemovalFilter_setParamsEx', [['DCRemovalFilter *', 'aDCRemovalFilter'], ['float', 'aLength', '0.1f']]],
['void', 'Modplug_destroy', [['Modplug *', 'aModplug']]],
['Modplug *', 'Modplug_create', [[]]],
['int', 'Modplug_load', [['Modplug *', 'aModplug'], ['const char *', 'aFilename']]],
['int', 'Modplug_loadMem', [['Modplug *', 'aModplug'], ['unsigned char *', 'aMem'], ['unsigned int', 'aLength']]],
['int', 'Modplug_loadMemEx', [['Modplug *', 'aModplug'], ['unsigned char *', 'aMem'], ['unsigned int', 'aLength'], ['int', 'aCopy', 'false'], ['int', 'aTakeOwnership', 'true']]],
['int', 'Modplug_loadFile', [['Modplug *', 'aModplug'], ['File *', 'aFile']]],
['void', 'Modplug_setVolume', [['Modplug *', 'aModplug'], ['float', 'aVolume']]],
['void', 'Modplug_setLooping', [['Modplug *', 'aModplug'], ['int', 'aLoop']]],
['void', 'Modplug_set3dMinMaxDistance', [['Modplug *', 'aModplug'], ['float', 'aMinDistance'], ['float', 'aMaxDistance']]],
['void', 'Modplug_set3dAttenuation', [['Modplug *', 'aModplug'], ['unsigned int', 'aAttenuationModel'], ['float', 'aAttenuationRolloffFactor']]],
['void', 'Modplug_set3dDopplerFactor', [['Modplug *', 'aModplug'], ['float', 'aDopplerFactor']]],
['void', 'Modplug_set3dProcessing', [['Modplug *', 'aModplug'], ['int', 'aDo3dProcessing']]],
['void', 'Modplug_set3dListenerRelative', [['Modplug *', 'aModplug'], ['int', 'aListenerRelative']]],
['void', 'Modplug_set3dDistanceDelay', [['Modplug *', 'aModplug'], ['int', 'aDistanceDelay']]],
['void', 'Modplug_set3dCollider', [['Modplug *', 'aModplug'], ['AudioCollider *', 'aCollider']]],
['void', 'Modplug_set3dColliderEx', [['Modplug *', 'aModplug'], ['AudioCollider *', 'aCollider'], ['int', 'aUserData', '0']]],
['void', 'Modplug_set3dAttenuator', [['Modplug *', 'aModplug'], ['AudioAttenuator *', 'aAttenuator']]],
['void', 'Modplug_setInaudibleBehavior', [['Modplug *', 'aModplug'], ['int', 'aMustTick'], ['int', 'aKill']]],
['void', 'Modplug_setFilter', [['Modplug *', 'aModplug'], ['unsigned int', 'aFilterId'], ['Filter *', 'aFilter']]],
['void', 'Modplug_stop', [['Modplug *', 'aModplug']]],
['void', 'Monotone_destroy', [['Monotone *', 'aMonotone']]],
['Monotone *', 'Monotone_create', [[]]],
['int', 'Monotone_setParams', [['Monotone *', 'aMonotone'], ['int', 'aHardwareChannels']]],
['int', 'Monotone_setParamsEx', [['Monotone *', 'aMonotone'], ['int', 'aHardwareChannels'], ['int', 'aWaveform', 'SQUARE']]],
['int', 'Monotone_load', [['Monotone *', 'aMonotone'], ['const char *', 'aFilename']]],
['int', 'Monotone_loadMem', [['Monotone *', 'aMonotone'], ['unsigned char *', 'aMem'], ['unsigned int', 'aLength']]],
['int', 'Monotone_loadMemEx', [['Monotone *', 'aMonotone'], ['unsigned char *', 'aMem'], ['unsigned int', 'aLength'], ['int', 'aCopy', 'false'], ['int', 'aTakeOwnership', 'true']]],
['int', 'Monotone_loadFile', [['Monotone *', 'aMonotone'], ['File *', 'aFile']]],
['void', 'Monotone_setVolume', [['Monotone *', 'aMonotone'], ['float', 'aVolume']]],
['void', 'Monotone_setLooping', [['Monotone *', 'aMonotone'], ['int', 'aLoop']]],
['void', 'Monotone_set3dMinMaxDistance', [['Monotone *', 'aMonotone'], ['float', 'aMinDistance'], ['float', 'aMaxDistance']]],
['void', 'Monotone_set3dAttenuation', [['Monotone *', 'aMonotone'], ['unsigned int', 'aAttenuationModel'], ['float', 'aAttenuationRolloffFactor']]],
['void', 'Monotone_set3dDopplerFactor', [['Monotone *', 'aMonotone'], ['float', 'aDopplerFactor']]],
['void', 'Monotone_set3dProcessing', [['Monotone *', 'aMonotone'], ['int', 'aDo3dProcessing']]],
['void', 'Monotone_set3dListenerRelative', [['Monotone *', 'aMonotone'], ['int', 'aListenerRelative']]],
['void', 'Monotone_set3dDistanceDelay', [['Monotone *', 'aMonotone'], ['int', 'aDistanceDelay']]],
['void', 'Monotone_set3dCollider', [['Monotone *', 'aMonotone'], ['AudioCollider *', 'aCollider']]],
['void', 'Monotone_set3dColliderEx', [['Monotone *', 'aMonotone'], ['AudioCollider *', 'aCollider'], ['int', 'aUserData', '0']]],
['void', 'Monotone_set3dAttenuator', [['Monotone *', 'aMonotone'], ['AudioAttenuator *', 'aAttenuator']]],
['void', 'Monotone_setInaudibleBehavior', [['Monotone *', 'aMonotone'], ['int', 'aMustTick'], ['int', 'aKill']]],
['void', 'Monotone_setFilter', [['Monotone *', 'aMonotone'], ['unsigned int', 'aFilterId'], ['Filter *', 'aFilter']]],
['void', 'Monotone_stop', [['Monotone *', 'aMonotone']]],
['void', 'TedSid_destroy', [['TedSid *', 'aTedSid']]],
['TedSid *', 'TedSid_create', [[]]],
['int', 'TedSid_load', [['TedSid *', 'aTedSid'], ['const char *', 'aFilename']]],
['int', 'TedSid_loadToMem', [['TedSid *', 'aTedSid'], ['const char *', 'aFilename']]],
['int', 'TedSid_loadMem', [['TedSid *', 'aTedSid'], ['unsigned char *', 'aMem'], ['unsigned int', 'aLength']]],
['int', 'TedSid_loadMemEx', [['TedSid *', 'aTedSid'], ['unsigned char *', 'aMem'], ['unsigned int', 'aLength'], ['int', 'aCopy', 'false'], ['int', 'aTakeOwnership', 'true']]],
['int', 'TedSid_loadFileToMem', [['TedSid *', 'aTedSid'], ['File *', 'aFile']]],
['int', 'TedSid_loadFile', [['TedSid *', 'aTedSid'], ['File *', 'aFile']]],
['void', 'TedSid_setVolume', [['TedSid *', 'aTedSid'], ['float', 'aVolume']]],
['void', 'TedSid_setLooping', [['TedSid *', 'aTedSid'], ['int', 'aLoop']]],
['void', 'TedSid_set3dMinMaxDistance', [['TedSid *', 'aTedSid'], ['float', 'aMinDistance'], ['float', 'aMaxDistance']]],
['void', 'TedSid_set3dAttenuation', [['TedSid *', 'aTedSid'], ['unsigned int', 'aAttenuationModel'], ['float', 'aAttenuationRolloffFactor']]],
['void', 'TedSid_set3dDopplerFactor', [['TedSid *', 'aTedSid'], ['float', 'aDopplerFactor']]],
['void', 'TedSid_set3dProcessing', [['TedSid *', 'aTedSid'], ['int', 'aDo3dProcessing']]],
['void', 'TedSid_set3dListenerRelative', [['TedSid *', 'aTedSid'], ['int', 'aListenerRelative']]],
['void', 'TedSid_set3dDistanceDelay', [['TedSid *', 'aTedSid'], ['int', 'aDistanceDelay']]],
['void', 'TedSid_set3dCollider', [['TedSid *', 'aTedSid'], ['AudioCollider *', 'aCollider']]],
['void', 'TedSid_set3dColliderEx', [['TedSid *', 'aTedSid'], ['AudioCollider *', 'aCollider'], ['int', 'aUserData', '0']]],
['void', 'TedSid_set3dAttenuator', [['TedSid *', 'aTedSid'], ['AudioAttenuator *', 'aAttenuator']]],
['void', 'TedSid_setInaudibleBehavior', [['TedSid *', 'aTedSid'], ['int', 'aMustTick'], ['int', 'aKill']]],
['void', 'TedSid_setFilter', [['TedSid *', 'aTedSid'], ['unsigned int', 'aFilterId'], ['Filter *', 'aFilter']]],
['void', 'TedSid_stop', [['TedSid *', 'aTedSid']]]
]
| |
import datetime
import os
from nose.tools import eq_, ok_
from django.contrib.auth.models import User, Group, Permission
from django.utils import timezone
from django.core import mail
from funfactory.urlresolvers import reverse
from airmozilla.main.models import (
Event,
Channel,
Tag,
SuggestedEvent,
Location,
SuggestedEventComment,
Template,
LocationDefaultEnvironment,
Approval
)
from airmozilla.comments.models import (
Discussion,
SuggestedDiscussion
)
from .base import ManageTestCase
class TestSuggestions(ManageTestCase):
placeholder_path = 'airmozilla/manage/tests/firefox.png'
placeholder = os.path.basename(placeholder_path)
def setUp(self):
super(TestSuggestions, self).setUp()
self._upload_media(self.placeholder_path)
def test_suggestions_page(self):
bob = User.objects.create_user('bob', email='bob@mozilla.com')
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
location = Location.objects.get(id=1)
SuggestedEvent.objects.create(
user=bob,
title='TITLE1',
slug='SLUG1',
short_description='SHORT DESCRIPTION1',
description='DESCRIPTION1',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
upcoming=True,
privacy=Event.PRIVACY_CONTRIBUTORS,
submitted=now,
first_submitted=now
)
SuggestedEvent.objects.create(
user=bob,
title='TITLE2',
slug='SLUG2',
short_description='SHORT DESCRIPTION2',
description='DESCRIPTION2',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
upcoming=False,
submitted=now - datetime.timedelta(days=1),
first_submitted=now - datetime.timedelta(days=1),
)
event3 = SuggestedEvent.objects.create(
user=bob,
title='TITLE3',
slug='SLUG3',
short_description='SHORT DESCRIPTION3',
description='DESCRIPTION3',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
submitted=now - datetime.timedelta(days=1),
first_submitted=now - datetime.timedelta(days=1),
upcoming=False,
popcorn_url='https://webmaker.org/1234'
)
url = reverse('manage:suggestions')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('TITLE1' in response.content)
ok_('TITLE2' in response.content)
ok_('TITLE3' in response.content)
ok_('popcorn' in response.content)
event3.first_submitted -= datetime.timedelta(days=300)
event3.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('TITLE1' in response.content)
ok_('TITLE2' in response.content)
ok_('TITLE3' not in response.content)
response = self.client.get(url, {'include_old': 1})
eq_(response.status_code, 200)
ok_('TITLE1' in response.content)
ok_('TITLE2' in response.content)
ok_('TITLE3' in response.content)
def test_suggestions_page_states(self):
bob = User.objects.create_user('bob', email='bob@mozilla.com')
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
location = Location.objects.get(id=1)
event = SuggestedEvent.objects.create(
user=bob,
title='TITLE',
slug='SLUG',
short_description='SHORT DESCRIPTION',
description='DESCRIPTION',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
privacy=Event.PRIVACY_CONTRIBUTORS,
submitted=now,
first_submitted=now
)
url = reverse('manage:suggestions')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Submitted' in response.content)
event.submitted += datetime.timedelta(days=1)
event.status = SuggestedEvent.STATUS_RESUBMITTED
event.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('TITLE' in response.content)
ok_('Resubmitted' in response.content)
event.review_comments = "Not good"
event.submitted = None
event.status = SuggestedEvent.STATUS_REJECTED
event.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('TITLE' in response.content)
ok_('Resubmitted' not in response.content)
ok_('Bounced' in response.content)
event.submitted = now + datetime.timedelta(seconds=10)
event.status = SuggestedEvent.STATUS_RESUBMITTED
event.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('TITLE' in response.content)
ok_('First submission' not in response.content)
ok_('Resubmitted' in response.content)
ok_('Bounced' not in response.content)
def test_approve_suggested_event_basic(self):
bob = User.objects.create_user('bob', email='bob@mozilla.com')
location = Location.objects.get(id=1)
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
tag1 = Tag.objects.create(name='TAG1')
tag2 = Tag.objects.create(name='TAG2')
channel = Channel.objects.create(name='CHANNEL')
# create a suggested event that has everything filled in
event = SuggestedEvent.objects.create(
user=bob,
title='TITLE' * 10,
slug='SLUG',
short_description='SHORT DESCRIPTION',
description='DESCRIPTION',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
privacy=Event.PRIVACY_CONTRIBUTORS,
additional_links='ADDITIONAL LINKS',
remote_presenters='RICHARD & ZANDR',
submitted=now,
first_submitted=now,
popcorn_url='https://',
)
event.tags.add(tag1)
event.tags.add(tag2)
event.channels.add(channel)
url = reverse('manage:suggestion_review', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('TITLE' in response.content)
ok_('SLUG' in response.content)
ok_('SHORT DESCRIPTION' in response.content)
ok_('DESCRIPTION' in response.content)
ok_('ADDITIONAL LINKS' in response.content)
ok_('RICHARD & ZANDR' in response.content)
ok_(os.path.basename(self.placeholder) in response.content)
ok_(location.name in response.content)
ok_(event.get_privacy_display() in response.content)
response = self.client.post(url)
eq_(response.status_code, 302)
# re-load it
event = SuggestedEvent.objects.get(pk=event.pk)
real = event.accepted
assert real
eq_(real.title, event.title)
eq_(real.slug, event.slug)
eq_(real.short_description, event.short_description)
eq_(real.description, event.description)
eq_(real.placeholder_img, event.placeholder_img)
eq_(real.location, location)
eq_(real.start_time, event.start_time)
eq_(real.privacy, event.privacy)
eq_(real.additional_links, event.additional_links)
eq_(real.remote_presenters, event.remote_presenters)
eq_(real.popcorn_url, '')
assert real.tags.all()
eq_([x.name for x in real.tags.all()],
[x.name for x in event.tags.all()])
assert real.channels.all()
eq_([x.name for x in real.channels.all()],
[x.name for x in event.channels.all()])
# it should have sent an email back
email_sent = mail.outbox[-1]
eq_(email_sent.recipients(), ['bob@mozilla.com'])
ok_('accepted' in email_sent.subject)
ok_('TITLE' in email_sent.subject)
ok_('TITLE' in email_sent.body)
# expect the link to the summary is in there
summary_url = reverse('suggest:summary', args=(event.pk,))
ok_(summary_url in email_sent.body)
def test_approve_suggested_event_pre_recorded(self):
bob = User.objects.create_user('bob', email='bob@mozilla.com')
location = Location.objects.get(id=1)
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
channel = Channel.objects.create(name='CHANNEL')
# create a suggested event that has everything filled in
event = SuggestedEvent.objects.create(
user=bob,
title='TITLE' * 10,
slug='SLUG',
short_description='SHORT DESCRIPTION',
description='DESCRIPTION',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
privacy=Event.PRIVACY_CONTRIBUTORS,
additional_links='ADDITIONAL LINKS',
remote_presenters='RICHARD & ZANDR',
submitted=now,
first_submitted=now,
popcorn_url='https://',
upcoming=False,
)
event.channels.add(channel)
url = reverse('manage:suggestion_review', args=(event.pk,))
response = self.client.post(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('manage:events')
)
real = Event.objects.get(title=event.title)
eq_(real.status, Event.STATUS_PENDING)
def test_approve_suggested_event_with_default_template_environment(self):
bob = User.objects.create_user('bob', email='bob@mozilla.com')
location = Location.objects.get(id=1)
template = Template.objects.create(name='My Template')
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
channel = Channel.objects.create(name='CHANNEL')
LocationDefaultEnvironment.objects.create(
location=location,
privacy=Event.PRIVACY_COMPANY,
template=template,
template_environment={'pri': 'vate'}
)
# and another one to make it slightly more challening
LocationDefaultEnvironment.objects.create(
location=location,
privacy=Event.PRIVACY_PUBLIC,
template=template,
template_environment={'pub': 'lic'}
)
# create a suggested event that has everything filled in
event = SuggestedEvent.objects.create(
user=bob,
title='TITLE' * 10,
slug='SLUG',
short_description='SHORT DESCRIPTION',
description='DESCRIPTION',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
privacy=Event.PRIVACY_COMPANY,
additional_links='ADDITIONAL LINKS',
remote_presenters='RICHARD & ZANDR',
submitted=now,
first_submitted=now,
popcorn_url='https://',
upcoming=True,
)
event.channels.add(channel)
url = reverse('manage:suggestion_review', args=(event.pk,))
response = self.client.post(url)
eq_(response.status_code, 302)
real = Event.objects.get(title=event.title)
self.assertRedirects(
response,
reverse('manage:event_edit', args=(real.pk,))
)
real = Event.objects.get(title=event.title)
eq_(real.template, template)
eq_(real.template_environment, {'pri': 'vate'})
def test_approved_suggested_popcorn_event(self):
bob = User.objects.create_user('bob', email='bob@mozilla.com')
location = Location.objects.get(id=1)
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
channel = Channel.objects.create(name='CHANNEL')
# we need a group that can approve events
group = Group.objects.get(name='testapprover')
permission = Permission.objects.get(codename='change_approval')
group.permissions.add(permission)
# create a suggested event that has everything filled in
event = SuggestedEvent.objects.create(
user=bob,
title='TITLE' * 10,
slug='SLUG',
short_description='SHORT DESCRIPTION',
description='DESCRIPTION',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
privacy=Event.PRIVACY_PUBLIC,
additional_links='ADDITIONAL LINKS',
remote_presenters='RICHARD & ZANDR',
upcoming=False,
popcorn_url='https://goodurl.com/',
submitted=now,
first_submitted=now,
)
event.channels.add(channel)
popcorn_template = Template.objects.create(
name='Popcorn template',
content='Bla bla',
default_popcorn_template=True
)
url = reverse('manage:suggestion_review', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('TITLE' in response.content)
ok_('https://goodurl.com' in response.content)
response = self.client.post(url)
eq_(response.status_code, 302)
# re-load it
event = SuggestedEvent.objects.get(pk=event.pk)
real = event.accepted
assert real
eq_(real.popcorn_url, event.popcorn_url)
eq_(real.start_time, real.archive_time)
eq_(real.template, popcorn_template)
eq_(real.status, Event.STATUS_SCHEDULED)
# that should NOT have created an Approval instance
ok_(not Approval.objects.filter(event=real))
def test_approved_suggested_event_with_discussion(self):
bob = User.objects.create_user('bob', email='bob@mozilla.com')
location = Location.objects.get(id=1)
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
channel = Channel.objects.create(name='CHANNEL')
# create a suggested event that has everything filled in
event = SuggestedEvent.objects.create(
user=bob,
title='TITLE' * 10,
slug='SLUG',
short_description='SHORT DESCRIPTION',
description='DESCRIPTION',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
privacy=Event.PRIVACY_CONTRIBUTORS,
additional_links='ADDITIONAL LINKS',
remote_presenters='RICHARD & ZANDR',
upcoming=False,
popcorn_url='https://goodurl.com/',
submitted=now,
first_submitted=now,
)
event.channels.add(channel)
richard = User.objects.create(email='richard@mozilla.com')
discussion = SuggestedDiscussion.objects.create(
event=event,
moderate_all=True,
notify_all=True,
enabled=True
)
discussion.moderators.add(bob)
discussion.moderators.add(richard)
url = reverse('manage:suggestion_review', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('TITLE' in response.content)
ok_('Enabled' in response.content)
ok_('bob@mozilla.com' in response.content)
ok_('richard@mozilla.com' in response.content)
response = self.client.post(url)
eq_(response.status_code, 302)
# re-load it
event = SuggestedEvent.objects.get(pk=event.pk)
real = event.accepted
assert real
eq_(real.popcorn_url, event.popcorn_url)
eq_(real.start_time, real.archive_time)
# that should now also have created a discussion
real_discussion = Discussion.objects.get(event=real)
ok_(real_discussion.enabled)
ok_(real_discussion.moderate_all)
ok_(real_discussion.notify_all)
ok_(richard in real_discussion.moderators.all())
ok_(bob in real_discussion.moderators.all())
def test_reject_suggested_event(self):
bob = User.objects.create_user('bob', email='bob@mozilla.com')
location = Location.objects.get(id=1)
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
tag1 = Tag.objects.create(name='TAG1')
tag2 = Tag.objects.create(name='TAG2')
channel = Channel.objects.create(name='CHANNEL')
# create a suggested event that has everything filled in
event = SuggestedEvent.objects.create(
user=bob,
title='TITLE',
slug='SLUG',
short_description='SHORT DESCRIPTION',
description='DESCRIPTION',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
privacy=Event.PRIVACY_CONTRIBUTORS,
submitted=now,
first_submitted=now,
)
event.tags.add(tag1)
event.tags.add(tag2)
event.channels.add(channel)
url = reverse('manage:suggestion_review', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('TITLE' in response.content)
ok_('SLUG' in response.content)
ok_('SHORT DESCRIPTION' in response.content)
ok_('DESCRIPTION' in response.content)
ok_(os.path.basename(self.placeholder) in response.content)
ok_(location.name in response.content)
ok_(event.get_privacy_display() in response.content)
data = {'reject': 'true'}
response = self.client.post(url, data)
eq_(response.status_code, 200)
data['review_comments'] = 'You suck!'
response = self.client.post(url, data)
eq_(response.status_code, 302)
# re-load it
event = SuggestedEvent.objects.get(pk=event.pk)
ok_(not event.accepted)
ok_(not event.submitted)
# still though
ok_(event.first_submitted)
eq_(event.status, SuggestedEvent.STATUS_REJECTED)
# it should have sent an email back
email_sent = mail.outbox[-1]
ok_(email_sent.recipients(), ['bob@mozilla.com'])
ok_('not accepted' in email_sent.subject)
ok_('TITLE' in email_sent.body)
ok_('You suck!' in email_sent.body)
summary_url = reverse('suggest:summary', args=(event.pk,))
ok_(summary_url in email_sent.body)
def test_comment_suggested_event(self):
bob = User.objects.create_user('bob', email='bob@mozilla.com')
location = Location.objects.get(id=1)
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
tag1 = Tag.objects.create(name='TAG1')
tag2 = Tag.objects.create(name='TAG2')
channel = Channel.objects.create(name='CHANNEL')
# create a suggested event that has everything filled in
event = SuggestedEvent.objects.create(
user=bob,
title='TITLE',
slug='SLUG',
short_description='SHORT DESCRIPTION',
description='DESCRIPTION',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
privacy=Event.PRIVACY_CONTRIBUTORS,
submitted=now,
first_submitted=now,
)
event.tags.add(tag1)
event.tags.add(tag2)
event.channels.add(channel)
url = reverse('manage:suggestion_review', args=(event.pk,))
data = {
'save_comment': 1,
'comment': ''
}
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('This field is required' in response.content)
assert not SuggestedEventComment.objects.all()
data['comment'] = """
Hi!
<script>alert("xss")</script>
"""
response = self.client.post(url, data)
eq_(response.status_code, 302)
comment, = SuggestedEventComment.objects.all()
eq_(comment.comment, data['comment'].strip())
eq_(comment.user, self.user) # who I'm logged in as
# this should have sent an email to bob
email_sent = mail.outbox[-1]
ok_(email_sent.recipients(), [bob.email])
ok_('New comment' in email_sent.subject)
ok_(event.title in email_sent.subject)
ok_('<script>alert("xss")</script>' in email_sent.body)
ok_(reverse('suggest:summary', args=(event.pk,)) in email_sent.body)
def test_retracted_comments_still_visible_in_management(self):
bob = User.objects.create_user(
'bob',
email='bob@mozilla.com',
password='secret'
)
location = Location.objects.get(id=1)
now = timezone.now()
tomorrow = now + datetime.timedelta(days=1)
tag1 = Tag.objects.create(name='TAG1')
tag2 = Tag.objects.create(name='TAG2')
channel = Channel.objects.create(name='CHANNEL')
# create a suggested event that has everything filled in
event = SuggestedEvent.objects.create(
user=bob,
title='TITLE',
slug='SLUG',
short_description='SHORT DESCRIPTION',
description='DESCRIPTION',
start_time=tomorrow,
location=location,
placeholder_img=self.placeholder,
privacy=Event.PRIVACY_CONTRIBUTORS,
first_submitted=now,
# Note! No `submitted=now` here
)
assert os.path.isfile(event.placeholder_img.path)
event.tags.add(tag1)
event.tags.add(tag2)
event.channels.add(channel)
url = reverse('manage:suggestions')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(event.title in response.content)
event_url = reverse('manage:suggestion_review', args=(event.pk,))
ok_(event_url in response.content)
response = self.client.get(event_url)
eq_(response.status_code, 200)
ok_('Event is currently not submitted' in response.content)
# You can't reject or approve it at this stage
data = {'reject': 'true', 'review_comments': 'Bla'}
response = self.client.post(event_url, data)
eq_(response.status_code, 400)
response = self.client.post(event_url, {})
eq_(response.status_code, 400)
# log in as bob
assert self.client.login(username='bob', password='secret')
summary_url = reverse('suggest:summary', args=(event.pk,))
response = self.client.get(summary_url)
eq_(response.status_code, 200)
ok_('Your event is no longer submitted' in response.content)
| |
# Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena
# License: BSD 3 clause
import numpy as np
from .base import SelectorMixin
from ..base import TransformerMixin, BaseEstimator, clone
from ..externals import six
from ..utils import safe_mask, check_array, deprecated
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
def _get_feature_importances(estimator):
"""Retrieve or aggregate feature importances from estimator"""
if hasattr(estimator, "feature_importances_"):
importances = estimator.feature_importances_
elif hasattr(estimator, "coef_"):
if estimator.coef_.ndim == 1:
importances = np.abs(estimator.coef_)
else:
importances = np.sum(np.abs(estimator.coef_), axis=0)
else:
raise ValueError(
"The underlying estimator %s has no `coef_` or "
"`feature_importances_` attribute. Either pass a fitted estimator"
" to SelectFromModel or call fit before calling transform."
% estimator.__class__.__name__)
return importances
def _calculate_threshold(estimator, importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
# determine default from estimator
est_name = estimator.__class__.__name__
if ((hasattr(estimator, "penalty") and estimator.penalty == "l1") or
"Lasso" in est_name):
# the natural default threshold is 0 when l1 penalty was used
threshold = 1e-5
else:
threshold = "mean"
if isinstance(threshold, six.string_types):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
class _LearntSelectorMixin(TransformerMixin):
# Note because of the extra threshold parameter in transform, this does
# not naturally extend from SelectorMixin
"""Transformer mixin selecting features based on importance weights.
This implementation can be mixin on any estimator that exposes a
``feature_importances_`` or ``coef_`` attribute to evaluate the relative
importance of individual features for feature selection.
"""
@deprecated('Support to use estimators as feature selectors will be '
'removed in version 0.19. Use SelectFromModel instead.')
def transform(self, X, threshold=None):
"""Reduce X to its most important features.
Uses ``coef_`` or ``feature_importances_`` to determine the most
important features. For models with a ``coef_`` for each class, the
absolute sum over the classes is used.
Parameters
----------
X : array or scipy sparse matrix of shape [n_samples, n_features]
The input samples.
threshold : string, float or None, optional (default=None)
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the threshold value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if
available, the object attribute ``threshold`` is used. Otherwise,
"mean" is used by default.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
check_is_fitted(self, ('coef_', 'feature_importances_'),
all_or_any=any)
X = check_array(X, 'csc')
importances = _get_feature_importances(self)
if len(importances) != X.shape[1]:
raise ValueError("X has different number of features than"
" during model fitting.")
if threshold is None:
threshold = getattr(self, 'threshold', None)
threshold = _calculate_threshold(self, importances, threshold)
# Selection
try:
mask = importances >= threshold
except TypeError:
# Fails in Python 3.x when threshold is str;
# result is array of True
raise ValueError("Invalid threshold: all features are discarded.")
if np.any(mask):
mask = safe_mask(X, mask)
return X[:, mask]
else:
raise ValueError("Invalid threshold: all features are discarded.")
class SelectFromModel(BaseEstimator, SelectorMixin):
"""Meta-transformer for selecting features based on importance weights.
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
This can be both a fitted (if ``prefit`` is set to True)
or a non-fitted estimator.
threshold : string, float, optional default None
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the ``threshold`` value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if the
estimator has a parameter penalty set to l1, either explicitly
or implicity (e.g, Lasso), the threshold is used is 1e-5.
Otherwise, "mean" is used by default.
prefit : bool, default False
Whether a prefit model is expected to be passed into the constructor
directly or not. If True, ``transform`` must be called directly
and SelectFromModel cannot be used with ``cross_val_score``,
``GridSearchCV`` and similar utilities that clone the estimator.
Otherwise train the model using ``fit`` and then ``transform`` to do
feature selection.
Attributes
----------
`estimator_`: an estimator
The base estimator from which the transformer is built.
This is stored only when a non-fitted estimator is passed to the
``SelectFromModel``, i.e when prefit is False.
`threshold_`: float
The threshold value used for feature selection.
"""
def __init__(self, estimator, threshold=None, prefit=False):
self.estimator = estimator
self.threshold = threshold
self.prefit = prefit
def _get_support_mask(self):
# SelectFromModel can directly call on transform.
if self.prefit:
estimator = self.estimator
elif hasattr(self, 'estimator_'):
estimator = self.estimator_
else:
raise ValueError(
'Either fit the model before transform or set "prefit=True"'
' while passing the fitted estimator to the constructor.')
scores = _get_feature_importances(estimator)
self.threshold_ = _calculate_threshold(estimator, scores,
self.threshold)
return scores >= self.threshold_
def fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
return self
def partial_fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer only once.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **fit_params)
return self
| |
import argparse
import csv
import os
import platform
import re
from collections import defaultdict, UserDict
import pytablewriter
class Result:
def __init__(self, timing, parsed_value, exception, matched_expected):
self.timing = timing
self.parsed_value = parsed_value
self.exception = exception
self.matched_expected = matched_expected
def formatted_timing(self):
return format_duration(self.timing) if self.timing is not None else ""
def __str__(self):
if self.exception:
return f"Raised ``{self.exception}`` Exception"
elif not self.matched_expected:
return f"**Incorrect Result** (``{self.parsed_value}``)"
else:
return self.formatted_timing()
class ModuleResults(UserDict):
def most_modern_result(self):
non_exception_results = [(_python_version, result) for _python_version, result in self.data.items() if result.exception is None]
return sorted(non_exception_results, key=lambda kvp: kvp[0], reverse=True)[0][1]
FILENAME_REGEX_RAW = r"benchmark_timings_python(\d)(\d\d?).csv"
FILENAME_REGEX = re.compile(FILENAME_REGEX_RAW)
MODULE_VERSION_FILENAME_REGEX_RAW = r"module_versions_python(\d)(\d\d?).csv"
MODULE_VERSION_FILENAME_REGEX = re.compile(MODULE_VERSION_FILENAME_REGEX_RAW)
UNITS = {"nsec": 1e-9, "usec": 1e-6, "msec": 1e-3, "sec": 1.0}
SCALES = sorted([(scale, unit) for unit, scale in UNITS.items()], reverse=True)
NOT_APPLICABLE = "N/A"
def format_duration(duration):
# Based on cPython's `timeit` CLI formatting
scale, unit = next(((scale, unit) for scale, unit in SCALES if duration >= scale), SCALES[-1])
precision = 3
return "%.*g %s" % (precision, duration / scale, unit)
def format_relative(duration1, duration2):
if duration1 is None or duration2 is None:
return NOT_APPLICABLE
precision = 1
return "%.*fx" % (precision, duration1 / duration2)
def format_used_module_versions(module_versions_used):
results = []
for module, versions in sorted(module_versions_used.items(), key=lambda x: x[0].lower()):
if len(versions) == 1:
results.append(f"{module}=={next(iter(versions.keys()))}")
else:
results.append(", ".join([f"{module}=={version} (on Python {', '.join(sorted(py_versions))})" for version, py_versions in versions.items()]))
return results
def relative_slowdown(subject, comparison):
most_modern_common_version = next(iter(sorted(set(subject.keys()).intersection(set(comparison)), reverse=True)), None)
if not most_modern_common_version:
raise ValueError("No common Python version found")
return format_relative(subject[most_modern_common_version].timing, comparison[most_modern_common_version].timing)
def filepaths(directory, condition):
return [os.path.join(parent, f) for parent, _dirs, files in os.walk(directory) for f in files if condition(f)]
def load_benchmarking_results(results_directory):
calling_code = {}
timestamps = set()
python_versions = set()
results = defaultdict(ModuleResults)
files_to_process = filepaths(results_directory, FILENAME_REGEX.match)
for csv_file in files_to_process:
try:
with open(csv_file, "r") as fin:
reader = csv.reader(fin, delimiter=",", quotechar='"')
major, minor, timestamp = next(reader)
major = int(major)
minor = int(minor)
timestamps.add(timestamp)
for module, _setup, stmt, parse_result, count, time_taken, matched, exception in reader:
timing = float(time_taken) / int(count) if exception == "" else None
exception = exception if exception != "" else None
results[module][(major, minor)] = Result(
timing,
parse_result,
exception,
matched == "True"
)
python_versions.add((major, minor))
calling_code[module] = f"``{stmt.format(timestamp=timestamp)}``"
except Exception:
print(f"Problem while parsing `{csv_file}`")
raise
if len(timestamps) > 1:
raise NotImplementedError(f"Found a mix of files in the results directory. Found files that represent the parsing of {timestamps}. Support for handling multiple timestamps is not implemented.")
python_versions_by_modernity = sorted(python_versions, reverse=True)
return results, python_versions_by_modernity, calling_code
def write_benchmarking_results(results_directory, output_file, baseline_module, include_call):
results, python_versions_by_modernity, calling_code = load_benchmarking_results(results_directory)
modules_by_modern_speed = [module for module, results in sorted([*results.items()], key=lambda kvp: kvp[1].most_modern_result().timing)]
writer = pytablewriter.RstGridTableWriter()
formatted_python_versions = [f"Python {major}.{minor}" for major, minor in python_versions_by_modernity]
writer.header_list = ["Module"] + (["Call"] if include_call else []) + formatted_python_versions + [f"Relative Slowdown (versus {baseline_module}, latest Python)"]
writer.type_hint_list = [pytablewriter.String] * len(writer.header_list)
calling_codes = [calling_code[module] for module in modules_by_modern_speed]
performance_results = [[results[module].get(python_version, NOT_APPLICABLE) for python_version in python_versions_by_modernity] for module in modules_by_modern_speed]
relative_slowdowns = [relative_slowdown(results[module], results[baseline_module]) if module != baseline_module else NOT_APPLICABLE for module in modules_by_modern_speed]
writer.value_matrix = [
[module] + ([calling_code[module]] if include_call else []) + performance_by_version + [relative_slowdown] for module, calling_code, performance_by_version, relative_slowdown in zip(modules_by_modern_speed, calling_codes, performance_results, relative_slowdowns)
]
with open(output_file, "w") as fout:
writer.stream = fout
writer.write_table()
fout.write("\n")
if len(modules_by_modern_speed) > 1:
baseline_module_timing = results[baseline_module].most_modern_result().formatted_timing()
fastest_module, next_fastest_module = modules_by_modern_speed[0:2]
if fastest_module == baseline_module:
fout.write(f"{baseline_module} takes {baseline_module_timing}, which is **{relative_slowdown(results[next_fastest_module], results[baseline_module])} faster than {next_fastest_module}**, the next fastest ISO 8601 parser in this comparison.\n")
else:
fout.write(f"{baseline_module} takes {baseline_module_timing}, which is **{relative_slowdown(results[baseline_module], results[fastest_module])} slower than {fastest_module}**, the fastest ISO 8601 parser in this comparison.\n")
def load_module_version_info(results_directory):
module_versions_used = defaultdict(dict)
files_to_process = filepaths(results_directory, MODULE_VERSION_FILENAME_REGEX.match)
for csv_file in files_to_process:
with open(csv_file, "r") as fin:
reader = csv.reader(fin, delimiter=",", quotechar='"')
major, minor = next(reader)
for module, version in reader:
if version not in module_versions_used[module]:
module_versions_used[module][version] = set()
module_versions_used[module][version].add(".".join((major, minor)))
return module_versions_used
def write_module_version_info(results_directory, output_file):
with open(output_file, "w") as fout:
fout.write(f"Tested on {platform.system()} {platform.release()} using the following modules:\n")
fout.write("\n")
fout.write(".. code:: python\n")
fout.write("\n")
for module_version_line in format_used_module_versions(load_module_version_info(results_directory)):
fout.write(f" {module_version_line}\n")
def main(results_directory, output_file, baseline_module, include_call, module_version_output):
write_benchmarking_results(results_directory, output_file, baseline_module, include_call)
write_module_version_info(results_directory, os.path.join(os.path.dirname(output_file), module_version_output))
if __name__ == "__main__":
OUTPUT_FILE_HELP = "The filepath to use when outputting the reStructuredText results."
RESULTS_DIR_HELP = f"Which directory the script should look in to find benchmarking results. Will process any file that match the regexes '{FILENAME_REGEX_RAW}' and '{MODULE_VERSION_FILENAME_REGEX_RAW}'."
BASELINE_LIBRARY_DEFAULT = "ciso8601"
BASELINE_LIBRARY_HELP = f'The module to make all relative calculations relative to (default: "{BASELINE_LIBRARY_DEFAULT}").'
INCLUDE_CALL_DEFAULT = False
INCLUDE_CALL_HELP = f"Whether or not to include a column showing the actual code call (default: {INCLUDE_CALL_DEFAULT})."
MODULE_VERSION_OUTPUT_FILE_DEFAULT = "benchmark_module_versions.rst"
MODULE_VERSION_OUTPUT_FILE_HELP = "The filename to use when outputting the reStructuredText list of module versions. Written to the same directory as `OUTPUT`"
parser = argparse.ArgumentParser("Formats the benchmarking results into a nicely formatted block of reStructuredText for use in the README.")
parser.add_argument("RESULTS", help=RESULTS_DIR_HELP)
parser.add_argument("OUTPUT", help=OUTPUT_FILE_HELP)
parser.add_argument("--baseline-module", required=False, default=BASELINE_LIBRARY_DEFAULT, help=BASELINE_LIBRARY_HELP)
parser.add_argument("--include-call", required=False, type=bool, default=INCLUDE_CALL_DEFAULT, help=INCLUDE_CALL_HELP)
parser.add_argument("--module-version-output", required=False, default=MODULE_VERSION_OUTPUT_FILE_DEFAULT, help=MODULE_VERSION_OUTPUT_FILE_HELP)
args = parser.parse_args()
if not os.path.exists(args.RESULTS):
raise ValueError(f'Results directory "{args.RESULTS}" does not exist.')
main(args.RESULTS, args.OUTPUT, args.baseline_module, args.include_call, args.module_version_output)
| |
#!/usr/bin/env python3
# Author: Gabriel Bordeaux (gabfl)
# Github: https://github.com/gabfl/mysql-batch
# Compatible with python 2.7 & 3
import sys
import time
import pymysql.cursors
import pymysql.constants.CLIENT
import argparse
def update_batch(ids, table, set_, sleep=0, primary_key='id'):
"""
Update a batch of rows
"""
global confirmed_write
# Leave if ids is empty
if not ids or len(ids) == 0:
return None
# Prepare update
print('* Updating %i rows...' % len(ids))
sql = "UPDATE " + table + " SET " + set_ + \
" WHERE {0} IN (".format(primary_key) + \
', '.join([str(x) for x in ids]) + ")"
print(" query: " + sql)
if confirmed_write or query_yes_no("* Start updating?"):
# Switch confirmed_write skip the question for the next update
confirmed_write = True
# Execute query
run_query(sql, sleep)
else: # answered "no"
print("Error: Update declined.")
sys.exit()
return True
def delete_batch(ids, table, sleep=0, primary_key='id'):
"""
Delete a batch of rows
"""
global confirmed_write
# Leave if ids is empty
if not ids or len(ids) == 0:
return None
# Prepare delete
print('* Deleting %i rows...' % len(ids))
sql = "DELETE FROM " + table + \
" WHERE {0} IN (".format(primary_key) + \
', '.join([str(x) for x in ids]) + ")"
print(" query: " + sql)
if confirmed_write or query_yes_no("* Start deleting?"):
# Switch confirmed_write skip the question for the next delete
confirmed_write = True
# Execute query
run_query(sql, sleep)
else: # answered "no"
print("Error: Delete declined.")
sys.exit()
return True
def run_query(sql, sleep=0):
"""Execute a write query"""
# Execute query
with connection.cursor() as cursorUpd:
cursorUpd.execute(sql)
connection.commit()
# Optional Sleep
if sleep > 0:
time.sleep(sleep)
return True
def get_input():
"""
Get user input
"""
# Get user choice with python 2.7 retro-compatibility
if sys.version_info >= (3, 0):
# Python 3
# print ("python >= 3");
return input().lower()
else:
# Python 2.7 retro-compatibility
# print ("python 2.7");
return raw_input().lower()
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
(thanks https://code.activestate.com/recipes/577058/)
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = get_input()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def connect(host, user, port, password, database):
"""
Connect to a MySQL database
"""
# Connect to the database
try:
return pymysql.connect(host=host,
user=user,
port=port,
password=password,
db=database,
charset='utf8mb4',
client_flag=pymysql.constants.CLIENT.MULTI_STATEMENTS,
cursorclass=pymysql.cursors.DictCursor)
except Exception:
raise RuntimeError('Error: MySQL connection failed.')
def execute(host, user, port, password, database, action, table, where, set_=None, no_confirm=False, primary_key='id', read_batch_size=10000, write_batch_size=50, sleep=0):
"""
Execute batch update or delete
"""
global confirmed_write, connection
# Make sure we have a SET clause for updates
if action == 'update' and set_ is None:
raise RuntimeError('Error: argument -s/--set is required for updates.')
# Connect to the database
connection = connect(host, user, port, password, database)
try:
# confirmed_write default value
confirmed_write = False
if no_confirm:
confirmed_write = True
with connection.cursor() as cursor:
# Default vars
min_id = 0
while 1: # Infinite loop, will be broken by sys.exit()
# Get rows to modify
print("* Selecting data...")
sql = "SELECT {0} as id FROM ".format(primary_key) + table + " WHERE " + where + \
" AND {0} > %s ORDER BY {1} LIMIT %s".format(
primary_key, primary_key)
print(" query: " + sql % (min_id, read_batch_size))
cursor.execute(sql, (min_id, read_batch_size))
# Row count
count = cursor.rowcount
# No more rows
if count == 0:
print("* No more rows to modify!")
sys.exit()
# Loop thru rows
print("* Preparing to modify %s rows..." % count)
ids = []
for result in cursor:
# Append ID to batch
ids.append(result.get('id'))
# print(result)
# Minimum ID for future select
min_id = result.get('id')
# Process write when batch size if reached
if len(ids) >= write_batch_size:
if action == 'delete':
# Process delete
delete_batch(ids, table, sleep, primary_key)
else:
# Process update
update_batch(ids, table, set_, sleep, primary_key)
# Reset ids
ids = []
# Process final batch
if ids and len(ids) >= 0:
if action == 'delete':
# Process delete
delete_batch(ids, table, sleep, primary_key)
else:
# Process update
update_batch(ids, table, set_, sleep, primary_key)
except SystemExit:
print("* Program exited")
# except:
# print("Unexpected error:", sys.exc_info()[0])
finally:
connection.close()
return True
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", default="127.0.0.1",
help="MySQL server host")
parser.add_argument("-P", "--port", type=int, default=3306,
help="MySQL server port")
parser.add_argument("-U", "--user", required=True,
help="MySQL user")
parser.add_argument("-p", "--password", default='',
help="MySQL password")
parser.add_argument("-d", "--database", required=True,
help="MySQL database name")
parser.add_argument("-t", "--table", required=True,
help="MySQL table")
parser.add_argument("-id", "--primary_key", default='id',
help="Name of the primary key column")
parser.add_argument("-w", "--where", required=True,
help="Select WHERE clause")
parser.add_argument("-s", "--set",
help="Update SET clause")
parser.add_argument("-rbz", "--read_batch_size", type=int, default=10000,
help="Select batch size")
parser.add_argument("-wbz", "--write_batch_size", type=int, default=50,
help="Update/delete batch size")
parser.add_argument("-S", "--sleep", type=float, default=0.00,
help="Sleep after each batch")
parser.add_argument("-a", "--action", default='update', choices=['update', 'delete'],
help="Action ('update' or 'delete')")
parser.add_argument("-n", "--no_confirm", action='store_true',
help="Don't ask for confirmation before to run the write queries")
args = parser.parse_args()
execute(host=args.host,
user=args.user,
port=args.port,
password=args.password,
database=args.database,
action=args.action,
table=args.table,
where=args.where,
set_=args.set,
no_confirm=args.no_confirm,
primary_key=args.primary_key,
read_batch_size=args.read_batch_size,
write_batch_size=args.write_batch_size,
sleep=args.sleep
)
if __name__ == '__main__':
main()
| |
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = []
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
# 'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
]
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
TEMPLATES = []
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ['/home/html', '/var/www']
ALLOWED_INCLUDE_ROOTS = []
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/3/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/3/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
]
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for sync component.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import os
import re
# Some definitions don't follow all the conventions we want to enforce.
# It's either difficult or impossible to fix this, so we ignore the problem(s).
EXCEPTION_MODEL_TYPES = [
# Grandfathered types:
'UNSPECIFIED', # Doesn't have a root tag or notification type.
'TOP_LEVEL_FOLDER', # Doesn't have a root tag or notification type.
'AUTOFILL_WALLET_DATA', # Root tag and model type string lack DATA suffix.
'APP_SETTINGS', # Model type string has inconsistent capitalization.
'EXTENSION_SETTINGS', # Model type string has inconsistent capitalization.
'PROXY_TABS', # Doesn't have a root tag or notification type.
'NIGORI', # Model type string is 'encryption keys'.
'SUPERVISED_USER_SETTINGS', # Root tag and model type string replace
# 'Supervised' with 'Managed'
'SUPERVISED_USER_WHITELISTS', # See previous.
# Deprecated types:
'DEPRECATED_EXPERIMENTS',
'DEPRECATED_FAVICON_IMAGES',
'DEPRECATED_FAVICON_TRACKING']
# Root tags are used as prefixes when creating storage keys, so certain strings
# are blacklisted in order to prevent prefix collision.
BLACKLISTED_ROOT_TAGS = [
'_mts_schema_descriptor'
]
# Number of distinct fields in a map entry; used to create
# sets that check for uniqueness.
MAP_ENTRY_FIELD_COUNT = 6
# String that precedes the ModelType when referencing the
# proto field number enum e.g.
# sync_pb::EntitySpecifics::kManagedUserFieldNumber.
# Used to map from enum references to the ModelType.
FIELD_NUMBER_PREFIX = 'sync_pb::EntitySpecifics::k'
# Start and end regexes for finding the EntitySpecifics definition in
# sync.proto.
PROTO_DEFINITION_START_PATTERN = '^ oneof specifics_variant \{'
PROTO_DEFINITION_END_PATTERN = '^\}'
# Start and end regexes for finding the ModelTypeInfoMap definition
# in model_type.cc.
MODEL_TYPE_START_PATTERN = '^const ModelTypeInfo kModelTypeInfoMap'
MODEL_TYPE_END_PATTERN = '^\};'
# Strings relating to files we'll need to read.
# model_type.cc is where the ModelTypeInfoMap is
# sync.proto is where the proto definitions for ModelTypes are.
PROTO_FILE_PATH = './protocol/sync.proto'
PROTO_FILE_NAME = 'sync.proto'
MODEL_TYPE_FILE_NAME = 'model_type.cc'
SYNC_SOURCE_FILES = (r'^components[\\/]sync[\\/].*\.(cc|h)$',)
# The wrapper around lint that is called below disables a set of filters if the
# passed filter evaluates to false. Pass a junk filter to avoid this behavior.
LINT_FILTERS = ['+fake/filter']
def CheckModelTypeInfoMap(input_api, output_api, model_type_file):
"""Checks the kModelTypeInfoMap in model_type.cc follows conventions.
Checks that the kModelTypeInfoMap follows the below rules:
1) The model type string should match the model type name, but with
only the first letter capitalized and spaces instead of underscores.
2) The root tag should be the same as the model type but all lowercase.
3) The notification type should match the proto message name.
4) No duplicate data across model types.
Args:
input_api: presubmit_support InputApi instance
output_api: presubmit_support OutputApi instance
model_type_file: AffectedFile object where the ModelTypeInfoMap is
Returns:
A (potentially empty) list PresubmitError objects corresponding to
violations of the above rules.
"""
accumulated_problems = []
map_entries = ParseModelTypeEntries(
input_api, model_type_file.AbsoluteLocalPath())
# If any line of the map changed, we check the whole thing since
# definitions span multiple lines and there are rules that apply across
# all definitions e.g. no duplicated field values.
check_map = False
for line_num, _ in model_type_file.ChangedContents():
for map_entry in map_entries:
if line_num in map_entry.affected_lines:
check_map = True
break
if not check_map:
return []
proto_field_definitions = ParseSyncProtoFieldIdentifiers(
input_api, os.path.abspath(PROTO_FILE_PATH))
accumulated_problems.extend(
CheckNoDuplicatedFieldValues(output_api, map_entries))
for map_entry in map_entries:
entry_problems = []
entry_problems.extend(
CheckNotificationTypeMatchesProtoMessageName(
output_api, map_entry, proto_field_definitions))
entry_problems.extend(CheckRootTagNotInBlackList(output_api, map_entry))
if map_entry.model_type not in EXCEPTION_MODEL_TYPES:
entry_problems.extend(
CheckModelTypeStringMatchesModelType(output_api, map_entry))
entry_problems.extend(
CheckRootTagMatchesModelType(output_api, map_entry))
if len(entry_problems) > 0:
accumulated_problems.extend(entry_problems)
return accumulated_problems
class ModelTypeEnumEntry(object):
"""Class that encapsulates a ModelTypeInfo definition in model_type.cc.
Allows access to each of the named fields in the definition and also
which lines the definition spans.
Attributes:
model_type: entry's ModelType enum value
notification_type: model type's notification string
root_tag: model type's root tag
model_type_string: string corresponding to the ModelType
field_number: proto field number
histogram_val: value identifying ModelType in histogram
affected_lines: lines in model_type.cc that the definition spans
"""
def __init__(self, entry_strings, affected_lines):
(model_type, notification_type, root_tag, model_type_string,
field_number, histogram_val) = entry_strings
self.model_type = model_type
self.notification_type = notification_type
self.root_tag = root_tag
self.model_type_string = model_type_string
self.field_number = field_number
self.histogram_val = histogram_val
self.affected_lines = affected_lines
def ParseModelTypeEntries(input_api, model_type_cc_path):
"""Parses model_type_cc_path for ModelTypeEnumEntries
Args:
input_api: presubmit_support InputAPI instance
model_type_cc_path: path to file containing the ModelTypeInfo entries
Returns:
A list of ModelTypeEnumEntry objects read from model_type.cc.
e.g. ('AUTOFILL_WALLET_METADATA', 'WALLET_METADATA',
'autofill_wallet_metadata', 'Autofill Wallet Metadata',
'sync_pb::EntitySpecifics::kWalletMetadataFieldNumber', '35',
[63, 64, 65])
"""
file_contents = input_api.ReadFile(model_type_cc_path)
start_pattern = input_api.re.compile(MODEL_TYPE_START_PATTERN)
end_pattern = input_api.re.compile(MODEL_TYPE_END_PATTERN)
results, definition_strings, definition_lines = [], [], []
inside_enum = False
current_line_number = 0
for line in file_contents.splitlines():
current_line_number += 1
if line.strip().startswith('//'):
# Ignore comments.
continue
if start_pattern.match(line):
inside_enum = True
continue
if inside_enum:
if end_pattern.match(line):
break
line_entries = line.strip().strip('{},').split(',')
definition_strings.extend([entry.strip('" ') for entry in line_entries])
definition_lines.append(current_line_number)
if line.endswith('},'):
results.append(ModelTypeEnumEntry(definition_strings, definition_lines))
definition_strings = []
definition_lines = []
return results
def ParseSyncProtoFieldIdentifiers(input_api, sync_proto_path):
"""Parses proto field identifiers from the EntitySpecifics definition.
Args:
input_api: presubmit_support InputAPI instance
proto_path: path to the file containing the proto field definitions
Returns:
A dictionary of the format {'SyncDataType': 'field_identifier'}
e.g. {'AutofillSpecifics': 'autofill'}
"""
proto_field_definitions = {}
proto_file_contents = input_api.ReadFile(sync_proto_path).splitlines()
start_pattern = input_api.re.compile(PROTO_DEFINITION_START_PATTERN)
end_pattern = input_api.re.compile(PROTO_DEFINITION_END_PATTERN)
in_proto_def = False
for line in proto_file_contents:
if start_pattern.match(line):
in_proto_def = True
continue
if in_proto_def:
if end_pattern.match(line):
break
line = line.strip()
split_proto_line = line.split(' ')
# ignore comments and lines that don't contain definitions.
if '//' in line or len(split_proto_line) < 3:
continue
field_typename = split_proto_line[0]
field_identifier = split_proto_line[1]
proto_field_definitions[field_typename] = field_identifier
return proto_field_definitions
def StripTrailingS(string):
return string.rstrip('sS')
def IsTitleCased(string):
return reduce(lambda bool1, bool2: bool1 and bool2,
[s[0].isupper() for s in string.split(' ')])
def FormatPresubmitError(output_api, message, affected_lines):
""" Outputs a formatted error message with filename and line number(s).
"""
if len(affected_lines) > 1:
message_including_lines = 'Error at lines %d-%d in model_type.cc: %s' %(
affected_lines[0], affected_lines[-1], message)
else:
message_including_lines = 'Error at line %d in model_type.cc: %s' %(
affected_lines[0], message)
return output_api.PresubmitError(message_including_lines)
def CheckNotificationTypeMatchesProtoMessageName(
output_api, map_entry, proto_field_definitions):
"""Check that map_entry's notification type matches sync.proto.
Verifies that the notification_type matches the name of the field defined
in the sync.proto by looking it up in the proto_field_definitions map.
Args:
output_api: presubmit_support OutputApi instance
map_entry: ModelTypeEnumEntry instance
proto_field_definitions: dict of proto field types and field names
Returns:
A potentially empty list of PresubmitError objects corresponding to
violations of the above rule
"""
if map_entry.field_number == '-1':
return []
proto_message_name = proto_field_definitions[
FieldNumberToPrototypeString(map_entry.field_number)]
if map_entry.notification_type.lower() != proto_message_name:
return [
FormatPresubmitError(
output_api,'In the construction of ModelTypeInfo: notification type'
' "%s" does not match proto message'
' name defined in sync.proto: ' '"%s"' %
(map_entry.notification_type, proto_message_name),
map_entry.affected_lines)]
return []
def CheckNoDuplicatedFieldValues(output_api, map_entries):
"""Check that map_entries has no duplicated field values.
Verifies that every map_entry in map_entries doesn't have a field value
used elsewhere in map_entries, ignoring special values ("" and -1).
Args:
output_api: presubmit_support OutputApi instance
map_entries: list of ModelTypeEnumEntry objects to check
Returns:
A list PresubmitError objects for each duplicated field value
"""
problem_list = []
field_value_sets = [set() for i in range(MAP_ENTRY_FIELD_COUNT)]
for map_entry in map_entries:
field_values = [
map_entry.model_type, map_entry.notification_type,
map_entry.root_tag, map_entry.model_type_string,
map_entry.field_number, map_entry.histogram_val]
for i in range(MAP_ENTRY_FIELD_COUNT):
field_value = field_values[i]
field_value_set = field_value_sets[i]
if field_value in field_value_set:
problem_list.append(
FormatPresubmitError(
output_api, 'Duplicated field value "%s"' % field_value,
map_entry.affected_lines))
elif len(field_value) > 0 and field_value != '-1':
field_value_set.add(field_value)
return problem_list
def CheckModelTypeStringMatchesModelType(output_api, map_entry):
"""Check that map_entry's model_type_string matches ModelType.
Args:
output_api: presubmit_support OutputApi instance
map_entry: ModelTypeEnumEntry object to check
Returns:
A list of PresubmitError objects for each violation
"""
problem_list = []
expected_model_type_string = map_entry.model_type.lower().replace('_', ' ')
if (StripTrailingS(expected_model_type_string) !=
StripTrailingS(map_entry.model_type_string.lower())):
problem_list.append(
FormatPresubmitError(
output_api,'model type string "%s" does not match model type.'
' It should be "%s"' % (
map_entry.model_type_string, expected_model_type_string.title()),
map_entry.affected_lines))
if not IsTitleCased(map_entry.model_type_string):
problem_list.append(
FormatPresubmitError(
output_api,'model type string "%s" should be title cased' %
(map_entry.model_type_string), map_entry.affected_lines))
return problem_list
def CheckRootTagMatchesModelType(output_api, map_entry):
"""Check that map_entry's root tag matches ModelType.
Args:
output_api: presubmit_support OutputAPI instance
map_entry: ModelTypeEnumEntry object to check
Returns:
A list of PresubmitError objects for each violation
"""
expected_root_tag = map_entry.model_type.lower()
if (StripTrailingS(expected_root_tag) !=
StripTrailingS(map_entry.root_tag)):
return [
FormatPresubmitError(
output_api,'root tag "%s" does not match model type. It should'
'be "%s"' % (map_entry.root_tag, expected_root_tag),
map_entry.affected_lines)]
return []
def CheckRootTagNotInBlackList(output_api, map_entry):
""" Checks that map_entry's root isn't a blacklisted string.
Args:
output_api: presubmit_support OutputAPI instance
map_entry: ModelTypeEnumEntry object to check
Returns:
A list of PresubmitError objects for each violation
"""
if map_entry.root_tag in BLACKLISTED_ROOT_TAGS:
return [FormatPresubmitError(
output_api,'root tag "%s" is a blacklisted root tag'
% (map_entry.root_tag), map_entry.affected_lines)]
return []
def FieldNumberToPrototypeString(field_number):
"""Converts a field number enum reference to an EntitySpecifics string.
Converts a reference to the field number enum to the corresponding
proto data type string.
Args:
field_number: string representation of a field number enum reference
Returns:
A string that is the corresponding proto field data type. e.g.
FieldNumberToPrototypeString('EntitySpecifics::kAppFieldNumber')
=> 'AppSpecifics'
"""
return field_number.replace(FIELD_NUMBER_PREFIX, '').replace(
'FieldNumber', 'Specifics')
def CheckChangeLintsClean(input_api, output_api):
source_filter = lambda x: input_api.FilterSourceFile(
x, white_list=SYNC_SOURCE_FILES, black_list=None)
return input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, source_filter, lint_filters=LINT_FILTERS,
verbose_level=1)
def CheckChanges(input_api, output_api):
results = []
results += CheckChangeLintsClean(input_api, output_api)
for f in input_api.AffectedFiles():
if (f.LocalPath().endswith(MODEL_TYPE_FILE_NAME) or
f.LocalPath().endswith(PROTO_FILE_NAME)):
results += CheckModelTypeInfoMap(input_api, output_api, f)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChanges(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChanges(input_api, output_api)
| |
"""Base geometry class and utilities
"""
import sys
from warnings import warn
from binascii import a2b_hex
from ctypes import pointer, c_size_t, c_char_p, c_void_p
from shapely.coords import CoordinateSequence
from shapely.ftools import wraps
from shapely.geos import lgeos, ReadingError
from shapely.geos import WKBWriter, WKTWriter
from shapely.impl import DefaultImplementation, delegated
if sys.version_info[0] < 3:
range = xrange
GEOMETRY_TYPES = [
'Point',
'LineString',
'LinearRing',
'Polygon',
'MultiPoint',
'MultiLineString',
'MultiPolygon',
'GeometryCollection',
]
def dump_coords(geom):
"""Dump coordinates of a geometry in the same order as data packing"""
if not isinstance(geom, BaseGeometry):
raise ValueError('Must be instance of a geometry class; found ' +
geom.__class__.__name__)
elif geom.type in ('Point', 'LineString', 'LinearRing'):
return geom.coords[:]
elif geom.type == 'Polygon':
return geom.exterior.coords[:] + [i.coords[:] for i in geom.interiors]
elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':
# Recursive call
return [dump_coords(part) for part in geom]
else:
raise ValueError('Unhandled geometry type: ' + repr(geom.type))
def geometry_type_name(g):
if g is None:
raise ValueError("Null geometry has no type")
return GEOMETRY_TYPES[lgeos.GEOSGeomTypeId(g)]
def geom_factory(g, parent=None):
# Abstract geometry factory for use with topological methods below
if not g:
raise ValueError("No Shapely geometry can be created from null value")
ob = BaseGeometry()
geom_type = geometry_type_name(g)
# TODO: check cost of dynamic import by profiling
mod = __import__(
'shapely.geometry',
globals(),
locals(),
[geom_type],
)
ob.__class__ = getattr(mod, geom_type)
ob.__geom__ = g
ob.__p__ = parent
if lgeos.methods['has_z'](g):
ob._ndim = 3
else:
ob._ndim = 2
return ob
def geom_from_wkt(data):
warn("`geom_from_wkt` is deprecated. Use `geos.wkt_reader.read(data)`.",
DeprecationWarning)
if sys.version_info[0] >= 3:
data = data.encode('ascii')
geom = lgeos.GEOSGeomFromWKT(c_char_p(data))
if not geom:
raise ReadingError(
"Could not create geometry because of errors while reading input.")
return geom_factory(geom)
def geom_to_wkt(ob):
warn("`geom_to_wkt` is deprecated. Use `geos.wkt_writer.write(ob)`.",
DeprecationWarning)
if ob is None or ob._geom is None:
raise ValueError("Null geometry supports no operations")
return lgeos.GEOSGeomToWKT(ob._geom)
def deserialize_wkb(data):
geom = lgeos.GEOSGeomFromWKB_buf(c_char_p(data), c_size_t(len(data)))
if not geom:
raise ReadingError(
"Could not create geometry because of errors while reading input.")
return geom
def geom_from_wkb(data):
warn("`geom_from_wkb` is deprecated. Use `geos.wkb_reader.read(data)`.",
DeprecationWarning)
return geom_factory(deserialize_wkb(data))
def geom_to_wkb(ob):
warn("`geom_to_wkb` is deprecated. Use `geos.wkb_writer.write(ob)`.",
DeprecationWarning)
if ob is None or ob._geom is None:
raise ValueError("Null geometry supports no operations")
size = c_size_t()
return lgeos.GEOSGeomToWKB_buf(c_void_p(ob._geom), pointer(size))
def geos_geom_from_py(ob, create_func=None):
"""Helper function for geos_*_from_py functions in each geom type.
If a create_func is specified the coodinate sequence is cloned and a new
geometry is created with it, otherwise the geometry is cloned directly.
This behaviour is useful for converting between LineString and LinearRing
objects.
"""
if create_func is None:
geom = lgeos.GEOSGeom_clone(ob._geom)
else:
cs = lgeos.GEOSGeom_getCoordSeq(ob._geom)
cs = lgeos.GEOSCoordSeq_clone(cs)
geom = create_func(cs)
N = ob._ndim
return geom, N
def exceptNull(func):
"""Decorator which helps avoid GEOS operations on null pointers."""
@wraps(func)
def wrapper(*args, **kwargs):
if not args[0]._geom or args[0].is_empty:
raise ValueError("Null/empty geometry supports no operations")
return func(*args, **kwargs)
return wrapper
class CAP_STYLE(object):
round = 1
flat = 2
square = 3
class JOIN_STYLE(object):
round = 1
mitre = 2
bevel = 3
EMPTY = deserialize_wkb(a2b_hex(b'010700000000000000'))
class BaseGeometry(object):
"""
Provides GEOS spatial predicates and topological operations.
"""
# Attributes
# ----------
# __geom__ : c_void_p
# Cached ctypes pointer to GEOS geometry. Not to be accessed.
# _geom : c_void_p
# Property by which the GEOS geometry is accessed.
# __p__ : object
# Parent (Shapely) geometry
# _ctypes_data : object
# Cached ctypes data buffer
# _ndim : int
# Number of dimensions (2 or 3, generally)
# _crs : object
# Coordinate reference system. Available for Shapely extensions, but
# not implemented here.
# _other_owned : bool
# True if this object's GEOS geometry is owned by another as in the
# case of a multipart geometry member.
__geom__ = EMPTY
__p__ = None
_ctypes_data = None
_ndim = None
_crs = None
_other_owned = False
# Backend config
impl = DefaultImplementation
@property
def _is_empty(self):
return self.__geom__ in [EMPTY, None]
# a reference to the so/dll proxy to preserve access during clean up
_lgeos = lgeos
def empty(self, val=EMPTY):
# TODO: defer cleanup to the implementation. We shouldn't be
# explicitly calling a lgeos method here.
if not self._is_empty and not self._other_owned and self.__geom__:
try:
self._lgeos.GEOSGeom_destroy(self.__geom__)
except AttributeError:
pass # _lgeos might be empty on shutdown
self.__geom__ = val
def __del__(self):
self.empty(val=None)
self.__p__ = None
def __str__(self):
return self.wkt
# To support pickling
def __reduce__(self):
return (self.__class__, (), self.wkb)
def __setstate__(self, state):
self.empty()
self.__geom__ = deserialize_wkb(state)
if lgeos.methods['has_z'](self.__geom__):
self._ndim = 3
else:
self._ndim = 2
@property
def _geom(self):
return self.__geom__
@_geom.setter
def _geom(self, val):
self.empty()
self.__geom__ = val
# Operators
# ---------
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __sub__(self, other):
return self.difference(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
tuple(self.coords) == tuple(other.coords)
)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = object.__hash__
# Array and ctypes interfaces
# ---------------------------
@property
def ctypes(self):
"""Return ctypes buffer"""
raise NotImplementedError
@property
def array_interface_base(self):
if sys.byteorder == 'little':
typestr = '<f8'
elif sys.byteorder == 'big':
typestr = '>f8'
else:
raise ValueError(
"Unsupported byteorder: neither little nor big-endian")
return {
'version': 3,
'typestr': typestr,
'data': self.ctypes,
}
@property
def __array_interface__(self):
"""Provide the Numpy array protocol."""
raise NotImplementedError
# Coordinate access
# -----------------
def _get_coords(self):
"""Access to geometry's coordinates (CoordinateSequence)"""
if self.is_empty:
return []
return CoordinateSequence(self)
def _set_coords(self, ob):
raise NotImplementedError(
"set_coords must be provided by derived classes")
coords = property(_get_coords, _set_coords)
@property
def xy(self):
"""Separate arrays of X and Y coordinate values"""
raise NotImplementedError
# Python feature protocol
@property
def __geo_interface__(self):
"""Dictionary representation of the geometry"""
raise NotImplementedError
# Type of geometry and its representations
# ----------------------------------------
def geometryType(self):
return geometry_type_name(self._geom)
@property
def type(self):
return self.geometryType()
def to_wkb(self):
warn("`to_wkb` is deprecated. Use the `wkb` property.",
DeprecationWarning)
return geom_to_wkb(self)
def to_wkt(self):
warn("`to_wkt` is deprecated. Use the `wkt` property.",
DeprecationWarning)
return geom_to_wkt(self)
@property
def wkt(self, **kw):
"""WKT representation of the geometry"""
return WKTWriter(lgeos, **kw).write(self)
@property
def wkb(self):
"""WKB representation of the geometry"""
return WKBWriter(lgeos).write(self)
@property
def wkb_hex(self):
"""WKB hex representation of the geometry"""
return WKBWriter(lgeos).write_hex(self)
def svg(self, scale_factor=1., **kwargs):
"""Raises NotImplementedError"""
raise NotImplementedError
def _repr_svg_(self):
"""SVG representation for iPython notebook"""
svg_top = '<svg xmlns="http://www.w3.org/2000/svg" ' \
'xmlns:xlink="http://www.w3.org/1999/xlink" '
if self.is_empty:
return svg_top + '/>'
else:
# Establish SVG canvas that will fit all the data + small space
xmin, ymin, xmax, ymax = self.bounds
if xmin == xmax and ymin == ymax:
# This is a point; buffer using an arbitrary size
xmin, ymin, xmax, ymax = self.buffer(1).bounds
else:
# Expand bounds by a fraction of the data ranges
expand = 0.04 # or 4%, same as R plots
widest_part = max([xmax - xmin, ymax - ymin])
expand_amount = widest_part * expand
xmin -= expand_amount
ymin -= expand_amount
xmax += expand_amount
ymax += expand_amount
dx = xmax - xmin
dy = ymax - ymin
width = min([max([100., dx]), 300])
height = min([max([100., dy]), 300])
try:
scale_factor = max([dx, dy]) / max([width, height])
except ZeroDivisionError:
scale_factor = 1.
view_box = "{0} {1} {2} {3}".format(xmin, ymin, dx, dy)
transform = "matrix(1,0,0,-1,0,{0})".format(ymax + ymin)
return svg_top + (
'width="{1}" height="{2}" viewBox="{0}" '
'preserveAspectRatio="xMinYMin meet">'
'<g transform="{3}">{4}</g></svg>'
).format(view_box, width, height, transform,
self.svg(scale_factor))
@property
def geom_type(self):
"""Name of the geometry's type, such as 'Point'"""
return self.geometryType()
# Real-valued properties and methods
# ----------------------------------
@property
def area(self):
"""Unitless area of the geometry (float)"""
return self.impl['area'](self)
def distance(self, other):
"""Unitless distance to other geometry (float)"""
return self.impl['distance'](self, other)
@property
def length(self):
"""Unitless length of the geometry (float)"""
return self.impl['length'](self)
# Topological properties
# ----------------------
@property
def boundary(self):
"""Returns a lower dimension geometry that bounds the object
The boundary of a polygon is a line, the boundary of a line is a
collection of points. The boundary of a point is an empty (null)
collection.
"""
return geom_factory(self.impl['boundary'](self))
@property
def bounds(self):
"""Returns minimum bounding region (minx, miny, maxx, maxy)"""
if self.is_empty:
return ()
else:
return self.impl['bounds'](self)
@property
def centroid(self):
"""Returns the geometric center of the object"""
return geom_factory(self.impl['centroid'](self))
@delegated
def representative_point(self):
"""Returns a point guaranteed to be within the object, cheaply."""
return geom_factory(self.impl['representative_point'](self))
@property
def convex_hull(self):
"""Imagine an elastic band stretched around the geometry: that's a
convex hull, more or less
The convex hull of a three member multipoint, for example, is a
triangular polygon.
"""
return geom_factory(self.impl['convex_hull'](self))
@property
def envelope(self):
"""A figure that envelopes the geometry"""
return geom_factory(self.impl['envelope'](self))
def buffer(self, distance, resolution=16, quadsegs=None,
cap_style=CAP_STYLE.round, join_style=JOIN_STYLE.round,
mitre_limit=5.0):
"""Returns a geometry with an envelope at a distance from the object's
envelope
A negative distance has a "shrink" effect. A zero distance may be used
to "tidy" a polygon. The resolution of the buffer around each vertex of
the object increases by increasing the resolution keyword parameter
or second positional parameter. Note: the use of a `quadsegs` parameter
is deprecated and will be gone from the next major release.
The styles of caps are: CAP_STYLE.round (1), CAP_STYLE.flat (2), and
CAP_STYLE.square (3).
The styles of joins between offset segments are: JOIN_STYLE.round (1),
JOIN_STYLE.mitre (2), and JOIN_STYLE.bevel (3).
The mitre limit ratio is used for very sharp corners. The mitre ratio
is the ratio of the distance from the corner to the end of the mitred
offset corner. When two line segments meet at a sharp angle, a miter
join will extend the original geometry. To prevent unreasonable
geometry, the mitre limit allows controlling the maximum length of the
join corner. Corners with a ratio which exceed the limit will be
beveled.
Example:
>>> from shapely.wkt import loads
>>> g = loads('POINT (0.0 0.0)')
>>> g.buffer(1.0).area # 16-gon approx of a unit radius circle
3.1365484905459389
>>> g.buffer(1.0, 128).area # 128-gon approximation
3.1415138011443009
>>> g.buffer(1.0, 3).area # triangle approximation
3.0
>>> list(g.buffer(1.0, cap_style='square').exterior.coords)
[(1.0, 1.0), (1.0, -1.0), (-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0)]
>>> g.buffer(1.0, cap_style='square').area
4.0
"""
if quadsegs is not None:
warn(
"The `quadsegs` argument is deprecated. Use `resolution`.",
DeprecationWarning)
res = quadsegs
else:
res = resolution
if mitre_limit == 0.0:
raise ValueError(
'Cannot compute offset from zero-length line segment')
if cap_style == CAP_STYLE.round and join_style == JOIN_STYLE.round:
return geom_factory(self.impl['buffer'](self, distance, res))
if 'buffer_with_style' not in self.impl:
raise NotImplementedError("Styled buffering not available for "
"GEOS versions < 3.2.")
return geom_factory(self.impl['buffer_with_style'](self, distance, res,
cap_style,
join_style,
mitre_limit))
@delegated
def simplify(self, tolerance, preserve_topology=True):
"""Returns a simplified geometry produced by the Douglas-Puecker
algorithm
Coordinates of the simplified geometry will be no more than the
tolerance distance from the original. Unless the topology preserving
option is used, the algorithm may produce self-intersecting or
otherwise invalid geometries.
"""
if preserve_topology:
op = self.impl['topology_preserve_simplify']
else:
op = self.impl['simplify']
return geom_factory(op(self, tolerance))
# Binary operations
# -----------------
def difference(self, other):
"""Returns the difference of the geometries"""
return geom_factory(self.impl['difference'](self, other))
def intersection(self, other):
"""Returns the intersection of the geometries"""
return geom_factory(self.impl['intersection'](self, other))
def symmetric_difference(self, other):
"""Returns the symmetric difference of the geometries
(Shapely geometry)"""
return geom_factory(self.impl['symmetric_difference'](self, other))
def union(self, other):
"""Returns the union of the geometries (Shapely geometry)"""
return geom_factory(self.impl['union'](self, other))
# Unary predicates
# ----------------
@property
def has_z(self):
"""True if the geometry's coordinate sequence(s) have z values (are
3-dimensional)"""
return bool(self.impl['has_z'](self))
@property
def is_empty(self):
"""True if the set of points in this geometry is empty, else False"""
return (self._geom is None) or bool(self.impl['is_empty'](self))
@property
def is_ring(self):
"""True if the geometry is a closed ring, else False"""
return bool(self.impl['is_ring'](self))
@property
def is_closed(self):
"""True if the geometry is closed, else False
Applicable only to 1-D geometries."""
if self.geom_type == 'LinearRing':
return True
elif self.geom_type == 'LineString':
if 'is_closed' in self.impl:
return bool(self.impl['is_closed'](self))
else:
return self.coords[0] == self.coords[-1]
else:
return False
@property
def is_simple(self):
"""True if the geometry is simple, meaning that any self-intersections
are only at boundary points, else False"""
return bool(self.impl['is_simple'](self))
@property
def is_valid(self):
"""True if the geometry is valid (definition depends on sub-class),
else False"""
return bool(self.impl['is_valid'](self))
# Binary predicates
# -----------------
def relate(self, other):
"""Returns the DE-9IM intersection matrix for the two geometries
(string)"""
return self.impl['relate'](self, other)
def covers(self, other):
"""Returns True if the geometry covers the other, else False"""
return bool(self.impl['covers'](self, other))
def contains(self, other):
"""Returns True if the geometry contains the other, else False"""
return bool(self.impl['contains'](self, other))
def crosses(self, other):
"""Returns True if the geometries cross, else False"""
return bool(self.impl['crosses'](self, other))
def disjoint(self, other):
"""Returns True if geometries are disjoint, else False"""
return bool(self.impl['disjoint'](self, other))
def equals(self, other):
"""Returns True if geometries are equal, else False"""
return bool(self.impl['equals'](self, other))
def intersects(self, other):
"""Returns True if geometries intersect, else False"""
return bool(self.impl['intersects'](self, other))
def overlaps(self, other):
"""Returns True if geometries overlap, else False"""
return bool(self.impl['overlaps'](self, other))
def touches(self, other):
"""Returns True if geometries touch, else False"""
return bool(self.impl['touches'](self, other))
def within(self, other):
"""Returns True if geometry is within the other, else False"""
return bool(self.impl['within'](self, other))
def equals_exact(self, other, tolerance):
"""Returns True if geometries are equal to within a specified
tolerance"""
# return BinaryPredicateOp('equals_exact', self)(other, tolerance)
return bool(self.impl['equals_exact'](self, other, tolerance))
def almost_equals(self, other, decimal=6):
"""Returns True if geometries are equal at all coordinates to a
specified decimal place"""
return self.equals_exact(other, 0.5 * 10**(-decimal))
# Linear referencing
# ------------------
@delegated
def project(self, other, normalized=False):
"""Returns the distance along this geometry to a point nearest the
specified point
If the normalized arg is True, return the distance normalized to the
length of the linear geometry.
"""
if normalized:
op = self.impl['project_normalized']
else:
op = self.impl['project']
return op(self, other)
@delegated
def interpolate(self, distance, normalized=False):
"""Return a point at the specified distance along a linear geometry
If the normalized arg is True, the distance will be interpreted as a
fraction of the geometry's length.
"""
if normalized:
op = self.impl['interpolate_normalized']
else:
op = self.impl['interpolate']
return geom_factory(op(self, distance))
class BaseMultipartGeometry(BaseGeometry):
def shape_factory(self, *args):
# Factory for part instances, usually a geometry class
raise NotImplementedError("To be implemented by derived classes")
@property
def ctypes(self):
raise NotImplementedError(
"Multi-part geometries have no ctypes representations")
@property
def __array_interface__(self):
"""Provide the Numpy array protocol."""
raise NotImplementedError("Multi-part geometries do not themselves "
"provide the array interface")
def _get_coords(self):
raise NotImplementedError("Sub-geometries may have coordinate "
"sequences, but collections do not")
def _set_coords(self, ob):
raise NotImplementedError("Sub-geometries may have coordinate "
"sequences, but collections do not")
@property
def coords(self):
raise NotImplementedError(
"Multi-part geometries do not provide a coordinate sequence")
@property
def geoms(self):
if self.is_empty:
return []
return GeometrySequence(self, self.shape_factory)
def __iter__(self):
if not self.is_empty:
return iter(self.geoms)
else:
return iter([])
def __len__(self):
if not self.is_empty:
return len(self.geoms)
else:
return 0
def __getitem__(self, index):
if not self.is_empty:
return self.geoms[index]
else:
return ()[index]
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
len(self) == len(other) and
all(x == y for x, y in zip(self, other))
)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = object.__hash__
def svg(self, scale_factor=1., color=None):
"""Returns a group of SVG elements for the multipart geometry.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
color : str, optional
Hex string for stroke or fill color. Default is to use "#66cc99"
if geometry is valid, and "#ff3333" if invalid.
"""
if self.is_empty:
return '<g />'
if color is None:
color = "#66cc99" if self.is_valid else "#ff3333"
return '<g>' + \
''.join(p.svg(scale_factor, color) for p in self) + \
'</g>'
class GeometrySequence(object):
"""
Iterative access to members of a homogeneous multipart geometry.
"""
# Attributes
# ----------
# _factory : callable
# Returns instances of Shapely geometries
# _geom : c_void_p
# Ctypes pointer to the parent's GEOS geometry
# _ndim : int
# Number of dimensions (2 or 3, generally)
# __p__ : object
# Parent (Shapely) geometry
shape_factory = None
_geom = None
__p__ = None
_ndim = None
def __init__(self, parent, type):
self.shape_factory = type
self.__p__ = parent
def _update(self):
self._geom = self.__p__._geom
self._ndim = self.__p__._ndim
def _get_geom_item(self, i):
g = self.shape_factory()
g._other_owned = True
g._geom = lgeos.GEOSGetGeometryN(self._geom, i)
g._ndim = self._ndim
g.__p__ = self
return g
def __iter__(self):
self._update()
for i in range(self.__len__()):
yield self._get_geom_item(i)
def __len__(self):
self._update()
return lgeos.GEOSGetNumGeometries(self._geom)
def __getitem__(self, key):
self._update()
m = self.__len__()
if isinstance(key, int):
if key + m < 0 or key >= m:
raise IndexError("index out of range")
if key < 0:
i = m + key
else:
i = key
return self._get_geom_item(i)
elif isinstance(key, slice):
if type(self) == HeterogeneousGeometrySequence:
raise TypeError(
"Heterogenous geometry collections are not sliceable")
res = []
start, stop, stride = key.indices(m)
for i in range(start, stop, stride):
res.append(self._get_geom_item(i))
return type(self.__p__)(res or None)
else:
raise TypeError("key must be an index or slice")
@property
def _longest(self):
max = 0
for g in iter(self):
l = len(g.coords)
if l > max:
max = l
class HeterogeneousGeometrySequence(GeometrySequence):
"""
Iterative access to a heterogeneous sequence of geometries.
"""
def __init__(self, parent):
super(HeterogeneousGeometrySequence, self).__init__(parent, None)
def _get_geom_item(self, i):
sub = lgeos.GEOSGetGeometryN(self._geom, i)
g = geom_factory(sub, parent=self)
g._other_owned = True
return g
def _test():
"""Test runner"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| |
from __future__ import with_statement
from distutils.version import StrictVersion
from itertools import chain
from select import select
import os
import socket
import sys
import threading
import warnings
try:
import ssl
ssl_available = True
except ImportError:
ssl_available = False
from redis._compat import (b, xrange, imap, byte_to_chr, unicode, bytes, long,
BytesIO, nativestr, basestring, iteritems,
LifoQueue, Empty, Full, urlparse, parse_qs)
from redis.exceptions import (
RedisError,
ConnectionError,
TimeoutError,
BusyLoadingError,
ResponseError,
InvalidResponse,
AuthenticationError,
NoScriptError,
ExecAbortError,
ReadOnlyError
)
from redis.utils import HIREDIS_AVAILABLE
if HIREDIS_AVAILABLE:
import hiredis
hiredis_version = StrictVersion(hiredis.__version__)
HIREDIS_SUPPORTS_CALLABLE_ERRORS = \
hiredis_version >= StrictVersion('0.1.3')
HIREDIS_SUPPORTS_BYTE_BUFFER = \
hiredis_version >= StrictVersion('0.1.4')
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
msg = ("redis-py works best with hiredis >= 0.1.4. You're running "
"hiredis %s. Please consider upgrading." % hiredis.__version__)
warnings.warn(msg)
HIREDIS_USE_BYTE_BUFFER = True
# only use byte buffer if hiredis supports it and the Python version
# is >= 2.7
if not HIREDIS_SUPPORTS_BYTE_BUFFER or (
sys.version_info[0] == 2 and sys.version_info[1] < 7):
HIREDIS_USE_BYTE_BUFFER = False
SYM_STAR = b('*')
SYM_DOLLAR = b('$')
SYM_CRLF = b('\r\n')
SYM_EMPTY = b('')
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
class Token(object):
"""
Literal strings in Redis commands, such as the command names and any
hard-coded arguments are wrapped in this class so we know not to apply
and encoding rules on them.
"""
def __init__(self, value):
if isinstance(value, Token):
value = value.value
self.value = value
def __repr__(self):
return self.value
def __str__(self):
return self.value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': ResponseError,
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
'READONLY': ReadOnlyError,
}
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
return self.EXCEPTION_CLASSES[error_code](response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size):
self._sock = socket
self.socket_read_size = socket_read_size
self._buffer = BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def _read_from_socket(self, length=None):
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
try:
while True:
data = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
break
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
def read(self, length):
length = length + 2 # make sure to read the \r\n terminator
# make sure we've read enough data from the socket
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
self.purge()
self._buffer.close()
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
"Plain Python parsing class"
encoding = None
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
"Called when the socket connects"
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock, self.socket_read_size)
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
"Called when the socket disconnects"
if self._sock is not None:
self._sock.close()
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoding = None
def can_read(self):
return self._buffer and bool(self._buffer.length)
def read_response(self):
response = self._buffer.readline()
if not response:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = byte_to_chr(response[0]), response[1:]
if byte not in ('-', '+', ':', '$', '*'):
raise InvalidResponse("Protocol Error: %s, %s" %
(str(byte), str(response)))
# server returned an error
if byte == '-':
response = nativestr(response)
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == '+':
pass
# int value
elif byte == ':':
response = long(response)
# bulk response
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
# multi-bulk response
elif byte == '*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for i in xrange(length)]
if isinstance(response, bytes) and self.encoding:
response = response.decode(self.encoding)
return response
class HiredisParser(BaseParser):
"Parser class for connections using Hiredis"
def __init__(self, socket_read_size):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not installed")
self.socket_read_size = socket_read_size
if HIREDIS_USE_BYTE_BUFFER:
self._buffer = bytearray(socket_read_size)
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
self._sock = connection._sock
kwargs = {
'protocolError': InvalidResponse,
'replyError': self.parse_error,
}
# hiredis < 0.1.3 doesn't support functions that create exceptions
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
kwargs['replyError'] = ResponseError
if connection.decode_responses:
kwargs['encoding'] = connection.encoding
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
def on_disconnect(self):
self._sock = None
self._reader = None
self._next_response = False
def can_read(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
return self._next_response is not False
def read_response(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
socket_read_size = self.socket_read_size
while response is False:
try:
if HIREDIS_USE_BYTE_BUFFER:
bufflen = self._sock.recv_into(self._buffer)
if bufflen == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
else:
buffer = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
if HIREDIS_USE_BYTE_BUFFER:
self._reader.feed(self._buffer, 0, bufflen)
else:
self._reader.feed(buffer)
# proactively, but not conclusively, check if more data is in the
# buffer. if the data received doesn't end with \r\n, there's more.
if HIREDIS_USE_BYTE_BUFFER:
if bufflen > 2 and \
self._buffer[bufflen - 2:bufflen] != SYM_CRLF:
continue
else:
if not buffer.endswith(SYM_CRLF):
continue
response = self._reader.gets()
# if an older version of hiredis is installed, we need to attempt
# to convert ResponseErrors to their appropriate types.
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
if isinstance(response, ResponseError):
response = self.parse_error(response.args[0])
elif isinstance(response, list) and response and \
isinstance(response[0], ResponseError):
response[0] = self.parse_error(response[0].args[0])
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif isinstance(response, list) and response and \
isinstance(response[0], ConnectionError):
raise response[0]
return response
if HIREDIS_AVAILABLE:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Redis server"
description_format = "Connection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, host='localhost', port=6379, db=0, password=None,
socket_timeout=None, socket_connect_timeout=None,
socket_keepalive=False, socket_keepalive_options=None,
retry_on_timeout=False, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'host': self.host,
'port': self.port,
'db': self.db,
}
self._connect_callbacks = []
def __repr__(self):
return self.description_format % self._description_args
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except RedisError:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
callback(self)
def _connect(self):
"Create a TCP socket connection"
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.SOL_TCP, k, v)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if a password is specified, authenticate
if self.password:
self.send_command('AUTH', self.password)
if nativestr(self.read_response()) != 'OK':
raise AuthenticationError('Invalid Password')
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Redis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def send_packed_command(self, command):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
try:
if isinstance(command, str):
command = [command]
for item in command:
self._sock.sendall(item)
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." %
(_errno, errmsg))
except:
self.disconnect()
raise
def send_command(self, *args):
"Pack and send a command to the Redis server"
self.send_packed_command(self.pack_command(*args))
def can_read(self, timeout=0):
"Poll the socket to see if there's data that can be read."
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read() or \
bool(select([sock], [], [], timeout)[0])
def read_response(self):
"Read the response from a previously sent command"
try:
response = self._parser.read_response()
except:
self.disconnect()
raise
if isinstance(response, ResponseError):
raise response
return response
def encode(self, value):
"Return a bytestring representation of the value"
if isinstance(value, Token):
return b(value.value)
elif isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
value = b(str(value))
elif isinstance(value, float):
value = b(repr(value))
elif not isinstance(value, basestring):
value = str(value)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def pack_command(self, *args):
"Pack a series of arguments into the Redis protocol"
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. All of these arguements get wrapped in the Token class
# to prevent them from being encoded.
command = args[0]
if ' ' in command:
args = tuple([Token(s) for s in command.split(' ')]) + args[1:]
else:
args = (Token(command),) + args[1:]
buff = SYM_EMPTY.join(
(SYM_STAR, b(str(len(args))), SYM_CRLF))
for arg in imap(self.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values
if len(buff) > 6000 or len(arg) > 6000:
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF))
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))),
SYM_CRLF, arg, SYM_CRLF))
output.append(buff)
return output
def pack_commands(self, commands):
"Pack multiple commands into the Redis protocol"
output = []
pieces = []
buffer_length = 0
for cmd in commands:
for chunk in self.pack_command(*cmd):
pieces.append(chunk)
buffer_length += len(chunk)
if buffer_length > 6000:
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
description_format = "SSLConnection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None,
ssl_ca_certs=None, **kwargs):
if not ssl_available:
raise RedisError("Python wasn't built with SSL support")
super(SSLConnection, self).__init__(**kwargs)
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
if ssl_cert_reqs is None:
ssl_cert_reqs = ssl.CERT_NONE
elif isinstance(ssl_cert_reqs, basestring):
CERT_REQS = {
'none': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED
}
if ssl_cert_reqs not in CERT_REQS:
raise RedisError(
"Invalid SSL Certificate Requirements Flag: %s" %
ssl_cert_reqs)
ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
def _connect(self):
"Wrap the socket with SSL support"
sock = super(SSLConnection, self)._connect()
sock = ssl.wrap_socket(sock,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
return sock
class UnixDomainSocketConnection(Connection):
description_format = "UnixDomainSocketConnection<path=%(path)s,db=%(db)s>"
def __init__(self, path='', db=0, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
retry_on_timeout=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.path = path
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'path': self.path,
'db': self.db,
}
self._connect_callbacks = []
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
class ConnectionPool(object):
"Generic connection pool"
@classmethod
def from_url(cls, url, db=None, **kwargs):
"""
Return a connection pool configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
redis:// creates a normal TCP socket connection
rediss:// creates a SSL wrapped TCP socket connection
unix:// creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
url_string = url
url = urlparse(url)
qs = ''
# in python2.6, custom URL schemes don't recognize querystring values
# they're left as part of the url.path.
if '?' in url.path and not url.query:
# chop the querystring including the ? off the end of the url
# and reparse it.
qs = url.path.split('?', 1)[1]
url = urlparse(url_string[:-(len(qs) + 1)])
else:
qs = url.query
url_options = {}
for name, value in iteritems(parse_qs(qs)):
if value and len(value) > 0:
url_options[name] = value[0]
# We only support redis:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'password': url.password,
'path': url.path,
'connection_class': UnixDomainSocketConnection,
})
else:
url_options.update({
'host': url.hostname,
'port': int(url.port or 6379),
'password': url.password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and url.path:
try:
url_options['db'] = int(url.path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
# backwards compatability
if 'charset' in kwargs:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
kwargs['encoding'] = kwargs.pop('charset')
if 'errors' in kwargs:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
kwargs['encoding_errors'] = kwargs.pop('errors')
return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
"""
Create a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created connection_class is specified.
Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
"""
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, (int, long)) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
self.reset()
def __repr__(self):
return "%s<%s>" % (
type(self).__name__,
self.connection_class.description_format % self.connection_kwargs,
)
def reset(self):
self.pid = os.getpid()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
self._check_lock = threading.Lock()
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
# another thread already did the work while we waited
# on the lock.
return
self.disconnect()
self.reset()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._checkpid()
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"Disconnects all connections in the pool"
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from redis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=50, timeout=20,
connection_class=Connection, queue_class=LifoQueue,
**connection_kwargs):
self.queue_class = queue_class
self.timeout = timeout
super(BlockingConnectionPool, self).__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
def reset(self):
self.pid = os.getpid()
self._check_lock = threading.Lock()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
connection = self.pool.get(block=True, timeout=self.timeout)
except Empty:
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"Releases the connection back to the pool."
# Make sure we haven't changed process.
self._checkpid()
if connection.pid != self.pid:
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
def disconnect(self):
"Disconnects all connections in the pool."
for connection in self._connections:
connection.disconnect()
| |
#!/usr/bin/env python
"""Function to compute a user-defined distance function among lists.
Distances can be between one (intra-distance) or two (inter-distances) lists.
Author: Federico Tomasi
Copyright (c) 2016, Federico Tomasi.
Licensed under the FreeBSD license (see LICENSE.txt).
"""
import multiprocessing as mp
import numpy as np
import scipy
import scipy.spatial
from itertools import chain, ifilter, combinations, islice
from icing.utils.extra import term_processes, progressbar
try:
xrange
except NameError: # python3
xrange = range
def _min(generator, func):
try:
return func(generator)
except ValueError:
return 0
def dnearest_inter_padding(l1, l2, dist_function, filt=None, func=min):
"""Compute in a parallel way a dist2nearest for two 1-d arrays.
Use this function with different arrays; if l1 == l2, then the
results is a 0-array.
Parameters
----------
l1, l2 : array_like
1-dimensional arrays. Compute the nearest element of l2 to l1.
dist_function : function
Function to use for the distance computation.
filt : function or None, optional
Filter based on the result of the distance function.
func : function, optional, default: min (built-in function)
Function to apply for selecting the best. Use min for distances,
max for similarities (consider numpy variants for speed).
Returns
-------
dist2nearest : array_like
1-D array
"""
def _internal(l1, l2, n, idx, nprocs, shared_arr, dist_function):
for i in xrange(idx, n, nprocs):
# if i % 100 == 0:
# progressbar(i, n)
shared_arr[i] = _min(
ifilter(filt, (dist_function(l1[i], el2) for el2 in l2)),
func)
n = len(l1)
nprocs = min(mp.cpu_count(), n)
shared_array = mp.Array('d', [0.] * n)
procs = []
try:
for idx in xrange(nprocs):
p = mp.Process(target=_internal,
args=(l1, l2, n, idx, nprocs, shared_array,
dist_function))
p.start()
procs.append(p)
for p in procs:
p.join()
except (KeyboardInterrupt, SystemExit):
term_processes(procs, 'Exit signal received\n')
except BaseException as msg:
term_processes(procs, 'ERROR: %s\n' % msg)
# progressbar(n, n)
return shared_array
def dnearest_intra_padding(l1, dist_function, filt=None, func=min):
"""Compute in a parallel way a dist2nearest for a 1-d arrays.
For each element in l1, find its closest (without considering itself).
Parameters
----------
l1 : array_like
1-dimensional array.
dist_function : function
Function to use for the distance computation.
Returns
-------
dist2nearest : array_like
1-D array
"""
def _internal(l1, n, idx, nprocs, shared_arr, dist_function):
for i in xrange(idx, n, nprocs):
# if i % 100 == 0:
# progressbar(i, n)
shared_arr[i] = _min(ifilter(filt, chain(
(dist_function(l1[i], l1[j]) for j in xrange(0, i)),
(dist_function(l1[i], l1[j]) for j in xrange(i + 1, n))
)), func)
n = len(l1)
nprocs = min(mp.cpu_count(), n)
shared_array = mp.Array('d', [0.] * n)
procs = []
try:
for idx in xrange(nprocs):
p = mp.Process(target=_internal,
args=(l1, n, idx, nprocs, shared_array,
dist_function))
p.start()
procs.append(p)
for p in procs:
p.join()
except (KeyboardInterrupt, SystemExit):
term_processes(procs, 'Exit signal received\n')
except BaseException as msg:
term_processes(procs, 'ERROR: %s\n' % msg)
# progressbar(n, n)
return shared_array
def dm_dense_inter_padding(l1, l2, dist_function, condensed=False):
"""Compute in a parallel way a distance matrix for a 1-d array.
Parameters
----------
l1, l2 : array_like
1-dimensional arrays. Compute the distance matrix for each couple of
elements of l1 and l2.
dist_function : function
Function to use for the distance computation.
Returns
-------
dist_matrix : array_like
Symmetric NxN distance matrix for each input_array element.
"""
def _internal(l1, l2, n, idx, nprocs, shared_arr, dist_function):
for i in xrange(idx, n, nprocs):
if i % 100 == 0:
progressbar(i, n)
shared_arr[i] = [dist_function(l1[i], el2) for el2 in l2]
n, m = len(l1), len(l2)
nprocs = min(mp.cpu_count(), n)
# index = mp.Value('i', 0)
# lock = mp.Lock()
shared_array = np.frombuffer(mp.Array('d', n*m).get_obj()).reshape((n, m))
procs = []
try:
for idx in xrange(nprocs):
p = mp.Process(target=_internal,
args=(l1, l2, n, idx, nprocs, shared_array,
dist_function))
p.start()
procs.append(p)
for p in procs:
p.join()
except (KeyboardInterrupt, SystemExit):
term_processes(procs, 'Exit signal received\n')
except BaseException as msg:
term_processes(procs, 'ERROR: %s\n' % msg)
# progressbar(n,n)
return shared_array.flatten() if condensed else shared_array
def dm_dense_intra_padding(l1, dist_function, condensed=False):
"""Compute in a parallel way a distance matrix for a 1-d array.
Parameters
----------
l1, l2 : array_like
1-dimensional arrays. Compute the distance matrix for each couple of
elements of l1.
dist_function : function
Function to use for the distance computation.
Returns
-------
dist_matrix : array_like
Symmetric NxN distance matrix for each input_array element.
"""
def _internal(l1, n, idx, nprocs, shared_arr, dist_function):
for i in xrange(idx, n, nprocs):
if i % 2 == 0:
progressbar(i, n)
# shared_arr[i, i:] = [dist_function(l1[i], el2) for el2 in l2]
for j in xrange(i + 1, n):
shared_arr[i, j] = dist_function(l1[i], l1[j])
# if shared_arr[idx, j] == 0:
# print l1[i].junction, '\n', l1[j].junction, '\n----------'
n = len(l1)
nprocs = min(mp.cpu_count(), n)
shared_array = np.frombuffer(mp.Array('d', n*n).get_obj()).reshape((n, n))
procs = []
try:
for idx in xrange(nprocs):
p = mp.Process(target=_internal,
args=(l1, n, idx, nprocs, shared_array,
dist_function))
p.start()
procs.append(p)
for p in procs:
p.join()
except (KeyboardInterrupt, SystemExit):
term_processes(procs, 'Exit signal received\n')
except BaseException as msg:
term_processes(procs, 'ERROR: %s\n' % msg)
progressbar(n, n)
dist_matrix = shared_array + shared_array.T
if condensed:
dist_matrix = scipy.spatial.distance.squareform(dist_matrix)
return dist_matrix
def _job(n, X, tol):
i, j = n
if len(X[i].setV & X[j].setV) > 0 and \
abs(X[i].junction_length - X[j].junction_length) < tol:
return (i, j)
def sm_sparse(X, metric, tol):
"""Compute in a parallel way a sim matrix for a 1-d array.
Parameters
----------
l1 : array_like
1-dimensional arrays. Compute the distance matrix for each couple of
elements of l1.
dist_function : function
Function to use for the distance computation.
Returns
-------
dist_matrix : array_like
Symmetric NxN distance matrix for each input_array element.
"""
def _internal(X, metric, iterator, idx, return_queue):
data = np.empty(0, dtype=float)
rows = np.empty(0, dtype=int)
cols = np.empty(0, dtype=int)
append = np.append
for i, j in iterator:
res = metric(X[i], X[j])
if res > 0:
data = append(data, res)
rows = append(rows, i)
cols = append(cols, j)
return_queue.put((data, rows, cols), False)
return_queue.put(None, True)
def _internal_deque(X, metric, iterator, idx, return_queue):
from collections import deque
deq = deque()
appendleft = deq.appendleft
popleft = deq.popleft
for i, j in iterator:
res = metric(X[i], X[j])
if res > 0: # np.random.ranf(1)[0] / 10:
appendleft((res, i, j))
len_d = len(deq)
data = np.empty(len_d, dtype=float)
rows = np.empty(len_d, dtype=int)
cols = np.empty(len_d, dtype=int)
for i in xrange(len_d):
res = popleft()
data[i] = res[0]
rows[i] = res[1]
cols[i] = res[2]
return_queue.put((data, rows, cols), False)
return_queue.put(None, True)
n = X.shape[0]
nprocs = min(mp.cpu_count(), n)
# allows fast and lighter computation
# pool = mp.Pool(processes=4)
# from functools import partial
# job = partial(_job, X=X, tol=tol)
# import time
# tic = time.time()
# iterator = list(x for x in pool.imap_unordered(
# job, combinations(xrange(len(X)), 2), int(n * (n - 1) / 2 / nprocs))
# if x is not None)
# print(time.time() - tic)
# iterator = filter(lambda x: x is not None, pool.imap_unordered(
# job, combinations(xrange(len(X)), 2), int(n * (n - 1) / 2 / 4)))
# tic = time.time()
# def opt_iterator():
# lengths = np.array([x.junction_length for x in X])
# keys = np.arange(X.shape[0])
# # lst = dict(zip(keys, lengths))
# ss = dict()
# for k in keys:
# jl = lengths[k]
# # ss[l] = set(lst[abs(lst-l) <= tol])
# _ss_l = set(keys[abs(lengths - jl) <= tol])
# insert = True
# to_remove = None
# for a in _ss_l:
# s = lengths[a]
# try:
# _ss_l_old = ss[s]
# l1 = len(_ss_l.difference(_ss_l_old))
# l2 = len(_ss_l_old.difference(_ss_l))
# if l1 == 0 and l2 == 0:
# # they are equal, i can avoid the insertion
# insert = False
# break
# elif l1 == 0:
# # old is bigger, i avoid the insertion
# insert = False
# break
# elif l2 == 0:
# # new is bigger, i insert and remove the old
# insert = True
# to_remove = s
# break
# else:
# # they are different, continue
# continue
# except KeyError:
# continue
# if insert:
# if to_remove is not None:
# del ss[to_remove]
# ss[jl] = _ss_l
# comb = []
# for k in ss:
# comb += list((i, j) for i, j in combinations(ss[k], 2)
# if len(X[i].setV & X[j].setV) > 0)
# return list(set(comb))
#
# iterator = opt_iterator()
iterator = list(
(i, j) for i, j in combinations(xrange(X.shape[0]), 2) if
len(X[i].setV & X[j].setV) > 0 and
abs(X[i].junction_length - X[j].junction_length) <= tol)
# print(time.time() - tic)
# pool.close()
len_it = len(iterator)
procs = []
manager = mp.Manager()
return_queue = manager.Queue()
data = np.empty(0, dtype=float)
rows = np.empty(0, dtype=int)
cols = np.empty(0, dtype=int)
try:
for idx in xrange(nprocs):
num_elem = int(len_it / nprocs) + 1
itera = iterator[:num_elem]
iterator = iterator[num_elem:]
# itera = list(islice(iterator, int(n * (n - 1) / (2. * nprocs)) + 1))
p = mp.Process(
target=_internal_deque,
args=(X, metric, itera, idx, return_queue))
p.start()
procs.append(p)
count = 0
while count < nprocs:
v = return_queue.get(True)
if v is None:
count += 1
continue
data = np.hstack((data, v[0]))
rows = np.hstack((rows, v[1]))
cols = np.hstack((cols, v[2]))
for p in procs:
p.join()
assert return_queue.empty()
except (KeyboardInterrupt, SystemExit):
term_processes(procs, 'Exit signal received\n')
except BaseException as msg:
term_processes(procs, 'ERROR: %s\n' % msg)
return data, rows, cols
def dm_sparse_intra_padding(l1, dist_function, condensed=False):
"""Compute in a parallel way a distance matrix for a 1-d input array.
Parameters
----------
l1 : array_like
1-dimensional array for which to compute the distance matrix.
dist_function : function
Function to use for the distance computation.
Returns
-------
dist_matrix : array_like
Sparse symmetric NxN distance matrix for each input_array element.
"""
def _internal(l1, n, idx, nprocs, rows, cols, data, dist_function):
for i in xrange(idx, n, nprocs):
if i % 100 == 0:
progressbar(i, n)
# shared_arr[i, i:] = [dist_function(l1[i], el2) for el2 in l2]
for j in xrange(i + 1, n):
# shared_arr[idx, j] = dist_function(l1[i], l1[j])
_res = dist_function(l1[i], l1[j])
if _res > 0:
c_idx = n*(n-1)/2 - (n-i)*(n-i-1)/2 + j - i - 1
data[c_idx] = _res
rows[c_idx] = i
cols[c_idx] = j
n = len(l1)
nprocs = min(mp.cpu_count(), n)
c_length = int(n * (n - 1) / 2)
data = mp.Array('d', [0.] * c_length)
rows = mp.Array('d', [0.] * c_length)
cols = mp.Array('d', [0.] * c_length)
procs = []
try:
for idx in xrange(nprocs):
process = mp.Process(
target=_internal,
args=(l1, n, idx, nprocs, rows, cols, data, dist_function))
process.start()
procs.append(process)
for process in procs:
process.join()
except (KeyboardInterrupt, SystemExit):
term_processes(procs, 'Exit signal received\n')
except BaseException as msg:
term_processes(procs, 'ERROR: %s\n' % msg)
data = np.array(data)
idx = data > 0
data = data[idx]
rows = np.array(rows)[idx]
cols = np.array(cols)[idx]
# print (data)
D = scipy.sparse.csr_matrix((data, (rows, cols)), shape=(n, n))
dist_matrix = D + D.T
if condensed:
dist_matrix = scipy.spatial.distance.squareform(dist_matrix)
progressbar(n, n)
return dist_matrix
def distance_matrix_parallel(input_array, dist_function, condensed=False,
sparse_mode=False):
"""TODO."""
_ = dm_sparse_intra_padding if sparse_mode else dm_dense_intra_padding
return _(input_array, dist_function, condensed=condensed)
| |
"""
dnapositions.py
Specification of DNA molecules in a variety of co-ordinate systems
"""
from typing import Dict, List
import pdb
import re
from copy import deepcopy
from itertools import combinations
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from fractaldna.utils.logging import logger
# Start by specifying the molecules in cylindrical co-ords
# From Arnott and Hukins, 1972, Biochem and Biophys Res Comms, 47, 6, p1504.
# B-DNA geometry
# Each array contains r (angstroms), theta (deg), z (angstroms) position
PHOSPHATE = {
"O1": np.array([8.75, 97.4, 3.63]),
"O2": np.array([10.20, 91.1, 1.86]),
"O3": np.array([8.82, 103.3, 1.29]),
"P1": np.array([8.91, 95.2, 2.08]),
"O4": np.array([7.73, 88.0, 1.83]),
}
DEOXYRIBOSE = {
"C5": np.array([7.70, 79.8, 2.77]),
"O5": np.array([6.22, 66.0, 1.83]),
"C4": np.array([7.59, 69.9, 2.04]),
"C3": np.array([8.20, 69.9, 0.64]),
"C2": np.array([7.04, 73.2, -0.24]),
"C1": np.array([5.86, 67.4, 0.47]),
}
ADENINE = {
"N9": np.array([4.63, 76.6, 0.42]),
"C8": np.array([4.84, 93.0, 0.50]),
"N7": np.array([3.95, 105.4, 0.43]),
"C5": np.array([2.74, 94.0, 0.28]),
"N6": np.array([1.83, 154.0, 0.14]),
"C6": np.array([1.41, 107.2, 0.15]),
"N1": np.array([0.86, 40.1, 0.03]),
"C2": np.array([2.17, 30.6, 0.04]),
"N3": np.array([3.24, 47.0, 0.16]),
"C4": np.array([3.33, 70.5, 0.28]),
}
GUANINE = {
"N9": np.array([4.63, 76.6, 0.42]),
"C8": np.array([4.82, 93.2, 0.50]),
"N7": np.array([3.92, 105.7, 0.42]),
"C5": np.array([2.70, 94.0, 0.28]),
"O6": np.array([1.71, 154.6, 0.13]),
"C6": np.array([1.39, 109.3, 0.15]),
"N1": np.array([0.92, 37.9, 0.03]),
"N2": np.array([3.01, 4.2, -0.10]),
"C2": np.array([2.28, 28.7, 0.03]),
"N3": np.array([3.29, 46.7, 0.16]),
"C4": np.array([3.33, 70.3, 0.28]),
}
CYTOSINE = {
"N1": np.array([4.63, 76.6, 0.42]),
"C6": np.array([4.99, 92.2, 0.52]),
"C5": np.array([4.35, 107.0, 0.47]),
"N4": np.array([2.76, 136.6, 0.27]),
"C4": np.array([2.94, 110.0, 0.32]),
"N3": np.array([2.31, 83.9, 0.22]),
"O2": np.array([3.69, 47.9, 0.18]),
"C2": np.array([3.40, 67.4, 0.27]),
}
THYMINE = {
"N1": np.array([4.63, 76.6, 0.42]),
"C6": np.array([5.01, 92.3, 0.52]),
"Me": np.array([5.40, 119.8, 0.58]),
"C5": np.array([4.38, 106.9, 0.47]),
"O4": np.array([2.82, 136.3, 0.27]),
"C4": np.array([2.98, 111.9, 0.32]),
"N3": np.array([2.36, 85.2, 0.23]),
"O2": np.array([3.64, 47.8, 0.18]),
"C2": np.array([3.42, 67.3, 0.27]),
}
# Entry of these numbers double-checked 16-MAR-2016 by NJL
# Van der Waals radius of varius elements in Angstrom.
# From Bondi (1964), J. Phys. Chem.; and
# Kammeyer & Whitman (1972), J. Chem. Phys.
RADIUS = {"H": 1.2, "C": 1.7, "O": 1.4, "N": 1.5, "P": 1.9, "Me": 2.1}
COLORS = {
"H": "white",
"C": "grey",
"O": "red",
"N": "skyblue",
"P": "goldenrod",
"Me": "grey",
"PHOSPHATE": "yellow",
"DEOXYRIBOSE": "black",
"ADENINE": "orange",
"GUANINE": "green",
"CYTOSINE": "red",
"THYMINE": "blue",
}
LETTERS = re.compile("[A-Za-z]+")
def opposite_pair(base: Dict[str, np.array]):
if base == THYMINE:
return ADENINE
elif base == ADENINE:
return THYMINE
elif base == GUANINE:
return CYTOSINE
elif base == CYTOSINE:
return GUANINE
else:
return None
def base_name(base: Dict[str, np.array]):
if base == THYMINE:
return "THYMINE"
elif base == ADENINE:
return "ADENINE"
elif base == GUANINE:
return "GUANINE"
elif base == CYTOSINE:
return "CYTOSINE"
else:
return None
def overlap_volume(pos1: List, pos2: List, r1: float, r2: float) -> float:
"""
Calculate overlapping volume of two spheres
overlap_volume(pos1, pos2, r1, r2)
:param pos1: Position of first molecule (x, y, z)
:param pos2: Position of second molecule (x, y, z)
:param r1: Radius of first molecule
:param r2: Radius of second molecule
:return: Volume of overlap.
"""
d = sum((pos1 - pos2) ** 2) ** 0.5
# check they overlap
if d >= (r1 + r2):
return 0
# check if one entirely holds the other
if r1 > (d + r2): # 2 is entirely contained in one
return 4.0 / 3.0 * np.pi * r2**3
if r2 > (d + r1): # 1 is entirely contained in one
return 4.0 / 3.0 * np.pi * r1**3
vol = (
np.pi
* (r1 + r2 - d) ** 2
* (d**2 + (2 * d * r1 - 3 * r1**2 + 2 * d * r2 - 3 * r2**2) + 6 * r1 * r2)
) / (12 * d)
return vol
def get_p_values(a, b, c, alpha, beta, gamma):
"""
Helper function for triple_overlap_volume
"""
t2 = (a + b + c) * (-a + b + c) * (a - b + c) * (a + b - c)
tabg2 = (
(a + beta + gamma)
* (-a + beta + gamma)
* (a - beta + gamma)
* (a + beta - gamma)
)
t = t2**0.5
tabg = tabg2**0.5
a2 = a**2
b2 = b**2
c2 = c**2
alpha2 = alpha**2
beta2 = beta**2
gamma2 = gamma**2
p1 = ((b2 - c2 + beta2 - gamma2) ** 2 + (t - tabg) ** 2) / (4 * a2) - alpha2 # NOQA
p2 = ((b2 - c2 + beta2 - gamma2) ** 2 + (t + tabg) ** 2) / (4 * a2) - alpha2 # NOQA
return p1, p2
def atanpi(val):
theta = np.arctan(val)
if theta < 0:
theta += np.pi
return theta
def triple_overlap_volume(pos1, pos2, pos3, r1, r2, r3):
"""
triple_overlap_volume(pos1, pos2, pos3, r1, r2, r3)
Calculate volume overlapped by 3 spheres
From Gibson and Scheraga (1987)
Note:
There are cases where this formula doesn't work properly, not documented
in the paper by Gibson and Scheraga.This corresponds to the case where
the center of one circle lies between the line joining the point of
intersection of the three cricles, and the line between the center of the
two other circles.
This geometry rarely arises for chemical species, but if negative volumes
start appearing, or other quantities that seem unlikely, a Monte Carlo
integration can be used (provided in this package).
"""
a = sum((pos3 - pos2) ** 2) ** 0.5
b = sum((pos3 - pos1) ** 2) ** 0.5
c = sum((pos2 - pos1) ** 2) ** 0.5
if not ((a <= (r3 + r2)) and (b <= (r3 + r1)) and (c <= (r2 + r1))):
return 0
# Check if one sphere entirely contains another
if (r1 > (b + r3)) or (r1 > (c + r2)): # Circle 1 encloses circle 2/3
vol = overlap_volume(pos2, pos3, r2, r3)
return vol
elif (r2 > (a + r3)) or (r2 > (c + r1)): # Circle 2 encloses circle 1/3
vol = overlap_volume(pos1, pos3, r1, r3)
return vol
elif (r3 > (b + r1)) or (r3 > (a + r2)): # Circle 3 encloses circle 1/2
vol = overlap_volume(pos1, pos2, r1, r2)
return vol
if (r1 > b) and (r2 > a): # Circle C is enclosed by both others
logger.debug("Warning:: Circle C's center is interior to A and B")
if (r1 > c) and (r3 > a): # Circle B is enclosed by both others
logger.debug("Warning:: Circle B's center is interior to A and C")
if (r2 > c) and (r3 > b): # Circle A is enclosed by both others
logger.debug("Warning:: Circle A's center is interior to B and C")
alpha = r1
beta = r2
gamma = r3
a2 = a**2
b2 = b**2
c2 = c**2
alpha2 = alpha**2
beta2 = beta**2
gamma2 = gamma**2
eps1 = (beta2 - gamma2) / a2
eps2 = (gamma2 - alpha2) / b2
eps3 = (alpha2 - beta2) / c2
w2 = (
(alpha2 * a2 + beta2 * b2 + gamma2 * c2) * (a2 + b2 + c2)
- 2 * (alpha2 * a2**2 + beta2 * b2**2 + gamma2 * c2**2)
+ (a2 * b2 * c2) * (eps1 * eps2 + eps2 * eps3 + eps3 * eps1 - 1)
)
if w2 > 0:
w = w2**0.5
q1 = a * (b2 + c2 - a2 + beta2 + gamma2 - 2.0 * alpha2 + eps1 * (b2 - c2))
q2 = b * (c2 + a2 - b2 + gamma2 + alpha2 - 2.0 * beta2 + eps2 * (c2 - a2))
q3 = c * (a2 + b2 - c2 + alpha2 + beta2 - 2.0 * gamma2 + eps3 * (a2 - b2))
alpha3 = alpha**3.0
beta3 = beta**3.0
gamma3 = gamma**3.0
aw = a * w
bw = b * w
cw = c * w
vol = (
w / 6.0
- a
/ 2.0
* (beta2 + gamma2 - a2 * (1.0 / 6.0 - eps1**2 / 2.0))
* atanpi(2 * w / q1) # NOQA
- b
/ 2.0
* (gamma2 + alpha2 - b2 * (1.0 / 6.0 - eps2**2 / 2.0))
* atanpi(2 * w / q2) # NOQA
- c
/ 2.0
* (alpha2 + beta2 - c2 * (1.0 / 6.0 - eps3**2 / 2.0))
* atanpi(2 * w / q3) # NOQA
+ (2.0 / 3.0)
* alpha3
* (
atanpi(bw / (alpha * q2) * (1 - eps2))
+ atanpi(cw / (alpha * q3) * (1 + eps3))
) # NOQA
+ (2.0 / 3.0)
* beta3
* (
atanpi(cw / (beta * q3) * (1 - eps3))
+ atanpi(aw / (beta * q1) * (1 + eps1))
) # NOQA
+ (2.0 / 3.0)
* gamma3
* (
atanpi(aw / (gamma * q1) * (1 - eps1))
+ atanpi(bw / (gamma * q2) * (1 + eps2))
)
) # NOQA
elif w2 < 0:
p1, p2 = get_p_values(a, b, c, alpha, beta, gamma)
p3, p4 = get_p_values(b, c, a, beta, gamma, alpha)
p5, p6 = get_p_values(c, a, b, gamma, alpha, beta)
if (p3 > 0) and (p5 > 0):
if p1 > 0:
vol = 0
if p1 < 0:
vol = overlap_volume(pos2, pos3, r2, r3)
elif (p1 > 0) and (p5 > 0): # fill out...
if p3 > 0:
vol = 0
if p3 < 0:
vol = overlap_volume(pos1, pos3, r1, r3)
elif (p1 > 0) and (p3 > 0):
if p5 > 0:
vol = 0
if p5 < 0:
vol = overlap_volume(pos1, pos2, r1, r2)
elif (p1 > 0) and (p3 < 0) and (p5 < 0): # NOQA
vol = (
overlap_volume(pos1, pos2, r1, r2)
+ overlap_volume(pos1, pos3, r1, r3)
- 4.0 / 3.0 * np.pi * r1**3.0
)
elif (p1 < 0) and (p3 > 0) and (p5 < 0): # NOQA
vol = (
overlap_volume(pos1, pos2, r1, r2)
+ overlap_volume(pos2, pos3, r2, r3)
- 4.0 / 3.0 * np.pi * r2**3.0
)
elif (p1 < 0) and (p3 < 0) and (p5 > 0): # NOQA
vol = (
overlap_volume(pos1, pos3, r1, r3)
+ overlap_volume(pos2, pos3, r2, r3)
- 4.0 / 3.0 * np.pi * r3**3.0
)
else:
# Fall back to MCMC calculation
vol = mc_triple_volume(pos1, pos2, pos3, r1, r2, r3)
else:
vol = 0
return vol
def mc_triple_volume(p1, p2, p3, r1, r2, r3, n=1e5):
# Generate points inside the box containing smallest circle
# as this is a constraint
rs = [r1, r2, r3]
if r1 == min(rs):
centres = p1
ranges = 2 * np.ones([3]) * r1
elif r2 == min(rs):
centres = p2
ranges = 2 * np.ones([3]) * r2
elif r3 == min(rs):
centres = p3
ranges = 2 * np.ones([3]) * r3
in_overlap = 0.0
in_circle = lambda p, c, r: sum((p - c) ** 2.0) ** 0.5 < r
for ii in range(int(n)):
position = (np.random.random(3) - 0.5) * ranges + centres
in1 = in_circle(position, p1, r1)
in2 = in_circle(position, p2, r2)
in3 = in_circle(position, p3, r3)
if in1 and in2 and in3:
in_overlap += 1
vol_total = np.product(ranges)
return vol_total * in_overlap / n
class MoleculeFromAtoms:
def __init__(self, atoms: Dict[str, np.array]):
"""
MoleculeFromAtoms(atoms)
A molecule created from a dictionary of cartesian atom positions
"""
self.atoms = deepcopy(atoms)
@classmethod
def from_cylindrical(cls, atoms: Dict[str, np.array], inverse: bool = False):
"""Make a MoleculeFromAtoms instance from a list of atoms in cylindrical
coords (r, theta, phi)
MoleculeFromAtoms.from_cylindrical(atoms, inverse=False)
Note:
Theta is in degrees
:param inverse: (default False)Set to True to generate a dyadically related
base pair (negates theta and z)
"""
cylindrical = deepcopy(atoms)
cartesian = {}
sgn = 1 if inverse is False else -1
for (name, pos) in cylindrical.items():
z = sgn * pos[2] # NOQA
y = pos[0] * np.sin(sgn * np.pi * pos[1] / 180.0)
x = pos[0] * np.cos(sgn * np.pi * pos[1] / 180.0)
cartesian[name] = np.array([x, y, z])
return cls(cartesian)
def find_center(self) -> float:
"""
c = MoleculeFromAtoms.find_center()
Find the barycenter of the atoms that constitute this molecule
"""
c = np.zeros([3])
denom = 0
for (atom, pos) in self.atoms.items():
r = RADIUS[LETTERS.match(atom).group()]
c += pos * r
denom += r
return c / denom
def find_half_lengths(self) -> np.array:
"""
half_lengths = MoleculeFromAtoms.find_half_lengths()
Return an array of the half lengths of a box that encloses the molecule
in xyz. This is not a minimum bounding volume
"""
c = self.find_center()
extents = []
for (atom, pos) in self.atoms.items():
pos -= c
rad = RADIUS[LETTERS.match(atom).group()]
x = (pos[0] - rad) if pos[0] <= 0 else (pos[0] + rad)
y = (pos[1] - rad) if pos[1] <= 0 else (pos[1] + rad)
z = (pos[2] - rad) if pos[2] <= 0 else (pos[2] + rad)
extents.append([x, y, z])
extents = np.array(extents)
max_extents = np.max(extents, axis=0)
min_extents = np.min(extents, axis=0)
return 0.5 * (max_extents - min_extents)
def find_equivalent_half_lengths(self) -> np.array:
"""
l = MoleculeFromAtoms.find_equivalent_half_lengths()
Find the half lengths scaled to give a volume equal to what the
constituent molecules occupy
"""
half_lengths = self.find_half_lengths()
max_volume = 4.0 / 3.0 * np.pi * np.product(half_lengths)
equiv_volume = 4.0 / 3.0 * np.pi * self.find_equivalent_radius() ** 3.0
return half_lengths * (equiv_volume / max_volume) ** (1.0 / 3.0)
def find_radius(self) -> float:
"""
r = MoleculeFromAtoms.find_radius()
Return the minimum radius that encloses this molecule
"""
c = self.find_center()
radii = []
for (atom, pos) in self.atoms.items():
pos -= c
rad = RADIUS[LETTERS.match(atom).group()]
rad += np.sqrt(np.sum(pos * pos))
radii.append(rad)
return np.max(radii)
def find_equivalent_radius(self) -> float:
"""
r = MoleculeFromAtoms.find_equivalent_radius()
Return the radius that yields the same volume occupied by all atoms
"""
vol = 0
for (atom, pos) in self.atoms.items():
rad = RADIUS[LETTERS.match(atom).group()]
vol += 4.0 / 3.0 * np.pi * rad**3
# subtract double overlaps
for ((a1, p1), (a2, p2)) in combinations(self.atoms.items(), 2):
r1 = RADIUS[LETTERS.match(a1).group()]
r2 = RADIUS[LETTERS.match(a2).group()]
vol -= overlap_volume(p1, p2, r1, r2)
for ((a1, p1), (a2, p2), (a3, p3)) in combinations(self.atoms.items(), 3):
# pdb.set_trace()
r1 = RADIUS[LETTERS.match(a1).group()]
r2 = RADIUS[LETTERS.match(a2).group()]
r3 = RADIUS[LETTERS.match(a3).group()]
vol += triple_overlap_volume(p1, p2, p3, r1, r2, r3)
return (vol * 3.0 / 4.0 / np.pi) ** (1.0 / 3.0)
def to_plot(self) -> plt.Figure:
"""
fig = MoleculeFromAtoms.to_plot()
Returns a matplotlib figure instance of the molecule.
"""
atomsets = {}
for (atom, pos) in self.atoms.items():
a = LETTERS.match(atom).group()
if a not in atomsets:
rad = RADIUS[a] if a in RADIUS else 2
col = COLORS[a] if a in COLORS else "blue"
atomsets[a] = {"radius": rad, "positions": [], "color": col}
atomsets[a]["positions"].append(pos)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
for atomset in atomsets.values():
pos = np.array(atomset["positions"])
size = 3.14 * atomset["radius"] * atomset["radius"]
color = atomset["color"]
ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2], s=20 * size, c=color)
return fig
def __len__(self) -> int:
"""
len(MoleculeFromAtoms)
returns number of atoms
"""
return self.atoms.__len__()
class MoleculeDictionary:
"""
Base class for test molecules
"""
def __init__(self):
self.atoms = {}
def items(self):
return self.atoms.items()
class DoubleStrand(MoleculeDictionary):
"""
Double strand of DNA (for testing)
"""
def __init__(self):
super().__init__()
sequence = [THYMINE, GUANINE, ADENINE, CYTOSINE, THYMINE] * 2
for ii in range(len(sequence)):
for (k, v) in sequence[ii].items():
x = v[0]
y = v[1] + 36 * ii
z = v[2] + 3.4 * ii
self.atoms[k + "_LEFTBP" + str(ii)] = np.array([x, y, z])
for (k, v) in DEOXYRIBOSE.items():
x = v[0]
y = v[1] + 36 * ii
z = v[2] + 3.4 * ii
self.atoms[k + "_LEFTDO" + str(ii)] = np.array([x, y, z])
for (k, v) in PHOSPHATE.items():
x = v[0]
y = v[1] + 36 * ii
z = v[2] + 3.4 * ii
self.atoms[k + "_LEFTPO" + str(ii)] = np.array([x, y, z])
for (k, v) in opposite_pair(sequence[ii]).items():
x = v[0]
y = -v[1] + 36 * ii
z = -v[2] + 3.4 * ii
self.atoms[k + "_RIGHTBP" + str(ii)] = np.array([x, y, z])
for (k, v) in DEOXYRIBOSE.items():
x = v[0]
y = -v[1] + 36 * ii
z = -v[2] + 3.4 * ii
self.atoms[k + "_RIGHTDO" + str(ii)] = np.array([x, y, z])
for (k, v) in PHOSPHATE.items():
x = v[0]
y = -v[1] + 36 * ii
z = -v[2] + 3.4 * ii
self.atoms[k + "_RIGHTPO" + str(ii)] = np.array([x, y, z])
return None
class DoubleStrandMolecules(MoleculeDictionary):
"""
Double strand of DNA (for testing)
"""
def __init__(self):
super().__init__()
sequence = [THYMINE, GUANINE, ADENINE, CYTOSINE, THYMINE] * 2
for ii in range(len(sequence)):
# left
bp = sequence[ii]
name = base_name(bp)
c = MoleculeFromAtoms(bp).find_center()
self.atoms[name + "_LEFT" + str(ii)] = self.shift(c, ii)
c = MoleculeFromAtoms(DEOXYRIBOSE).find_center()
self.atoms["DEOXYRIBOSE_LEFT" + str(ii)] = self.shift(c, ii)
c = MoleculeFromAtoms(PHOSPHATE).find_center()
self.atoms["PHOSPHATE_LEFT" + str(ii)] = self.shift(c, ii)
# right
bp = opposite_pair(bp)
name = base_name(bp)
c = MoleculeFromAtoms(bp).find_center()
self.atoms[name + "_RIGHT" + str(ii)] = self.shift(c, ii, inv=True)
c = MoleculeFromAtoms(DEOXYRIBOSE).find_center()
self.atoms["DEOXYRIBOSE_RIGHT" + str(ii)] = self.shift(c, ii, inv=True)
c = MoleculeFromAtoms(PHOSPHATE).find_center()
self.atoms["PHOSPHATE_RIGHT" + str(ii)] = self.shift(c, ii, inv=True)
return None
@staticmethod
def shift(v, ii, inv=False):
sign = -1 if inv is True else 1
x = v[0]
y = sign * v[1] + 36 * ii
z = sign * v[2] + 3.4 * ii
return np.array([x, y, z])
| |
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learning rate schedules."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import numpy as jnp
from lingvo.core import py_utils as tf_py_utils
from lingvo.core import schedule as tf_schedule
from lingvo.jax import schedules
from lingvo.jax import test_utils
class SchedulesTest(test_utils.TestCase):
@parameterized.parameters((0,), (10,), (100,), (1000000,))
def test_constant_schedule(self, count):
lr_value = 5.
p = schedules.Constant.Params().Set(value=lr_value)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
with self.subTest(name='reference_values'):
self.assertAllClose(jit_value(count), lr_value)
tf_p = tf_schedule.Constant.Params().Set(value=lr_value)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
@parameterized.parameters((29, 1.), (39, 0.1), (49, 0.01), (50, 0.001),
(59, 0.001))
def test_piecewise_constant_schedule(self, count, expected_value):
boundaries = [30, 40, 50]
values = [1.0, 0.1, 0.01, 0.001]
p = schedules.PiecewiseConstant.Params().Set(
boundaries=boundaries, values=values)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
with self.subTest(name='reference_values'):
self.assertAllClose(jit_value(jnp.array(count)), expected_value)
tf_p = tf_schedule.PiecewiseConstantSchedule.Params().Set(
boundaries=boundaries, values=values)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
@parameterized.parameters(
(0, 1), (10, 1), (100, 1), (1000000, 1), (0, 2), (10, 2), (100, 2),
(1000000, 2), (0, 3), (10, 3), (100, 3), (1000000, 3), (0, 4), (10, 4),
(100, 4), (1000000, 4), (0, 5), (10, 5), (100, 5), (1000000, 5))
def test_polynomial_schedule(self, count, power):
p = schedules.Polynomial.Params().Set(
start=(7, 0.9), limit=(370, 1.3), power=power)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
tf_p = tf_schedule.PolynomialSchedule.Params().Set(
start=(7, 0.9), limit=(370, 1.3), power=power)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
@parameterized.parameters((0, 1.74693e-07), (1000, 0.000174867),
(2000, 0.00034956), (3000, 0.000524253),
(4000, 0.000698684), (4500, 0.000658735),
(5000, 0.000624937))
def test_transformer_schedule_values(self, count, expected_value):
count = jnp.array(count)
p = schedules.Transformer.Params().Set(warmup_steps=4000, model_dim=512)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
with self.subTest(name='reference_values'):
self.assertAllClose(jit_value(count), expected_value)
tf_p = tf_schedule.TransformerSchedule.Params().Set(
warmup_steps=4000, model_dim=512)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_transformer_schedule_peak(self):
p = schedules.Transformer.Params().Set(warmup_steps=4000, model_dim=512)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
# Tests that the schedule peaks at 4000 steps.
v_3990 = jit_value(jnp.array(3990))
v_4000 = jit_value(jnp.array(4000))
v_4010 = jit_value(jnp.array(4010))
with self.subTest(name='reference_values'):
self.assertGreater(v_4000, v_3990)
self.assertGreater(v_4000, v_4010)
tf_p = tf_schedule.TransformerSchedule.Params().Set(
warmup_steps=4000, model_dim=512)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
for count in {3990, 4000, 4010}:
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_transformer_schedule_linear(self):
p = schedules.Transformer.Params().Set(warmup_steps=4000, model_dim=512)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
# Tests that the schedule increases linearly before 4000 steps.
with self.subTest(name='reference_values'):
for step in range(300, 4000, 200):
a = jit_value(jnp.array(step - 10))
b = jit_value(jnp.array(step))
c = jit_value(jnp.array(step + 10))
self.assertAllClose(b * 2., a + c)
tf_p = tf_schedule.TransformerSchedule.Params().Set(
warmup_steps=4000, model_dim=512)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
for count in range(300, 4000, 200):
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
@parameterized.parameters((0, 1.74693e-07), (1000, 0.000174867),
(2000, 0.00034956), (3000, 0.000524253),
(4000, 0.000698684), (4500, 0.000658735),
(5000, 0.000624937))
def test_transformer_schedule_with_decay_end_values(self, count,
expected_value):
p = schedules.Transformer.Params().Set(
warmup_steps=4000, model_dim=512, decay_end=5000)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
with self.subTest(name='reference_values'):
self.assertAllClose(jit_value(jnp.array(count)), expected_value)
tf_p = tf_schedule.TransformerSchedule.Params().Set(
warmup_steps=4000, model_dim=512, decay_end=5000)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_transformer_schedule_with_decay_end_peak(self):
p = schedules.Transformer.Params().Set(
warmup_steps=4000, model_dim=512, decay_end=5000)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
with self.subTest(name='reference_values'):
# Tests that the schedule peaks at 4000 steps.
v_3990 = jit_value(jnp.array(3990))
v_4000 = jit_value(jnp.array(4000))
v_4010 = jit_value(jnp.array(4010))
self.assertGreater(v_4000, v_3990)
self.assertGreater(v_4000, v_4010)
tf_p = tf_schedule.TransformerSchedule.Params().Set(
warmup_steps=4000, model_dim=512, decay_end=5000)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
for count in {3990, 4000, 4010}:
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_transformer_schedule_with_decay_end_linear(self):
p = schedules.Transformer.Params().Set(
warmup_steps=4000, model_dim=512, decay_end=5000)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
# Tests that the schedule increases linearly before 4000 steps.
with self.subTest(name='reference_values'):
for step in range(300, 4000, 200):
a = jit_value(jnp.array(step - 10))
b = jit_value(jnp.array(step))
c = jit_value(jnp.array(step + 10))
self.assertAllClose(b * 2., a + c)
tf_p = tf_schedule.TransformerSchedule.Params().Set(
warmup_steps=4000, model_dim=512, decay_end=5000)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
for count in range(300, 4000, 200):
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_transformer_schedule_with_decay_end_fixed(self):
p = schedules.Transformer.Params().Set(
warmup_steps=4000, model_dim=512, decay_end=5000)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
# Tests that the schedule is fixed after decay end steps.
v_decay_end = lr_schedule.value(jnp.array(p.decay_end))
with self.subTest(name='reference_values'):
self.assertGreater(jit_value(jnp.array(p.decay_end - 1)), v_decay_end)
self.assertAllClose(jit_value(jnp.array(p.decay_end + 1)), v_decay_end)
self.assertAllClose(jit_value(jnp.array(p.decay_end + 1000)), v_decay_end)
tf_p = tf_schedule.TransformerSchedule.Params().Set(
warmup_steps=4000, model_dim=512, decay_end=5000)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
for count in range(p.decay_end - 1, p.decay_end + 20, 2):
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
@parameterized.parameters((0,), (1000,), (2000,), (3000,), (4000,), (4500,),
(5000,))
def test_sqrt_decay_schedule_values(self, count):
p = schedules.SqrtDecay.Params().Set(warmup_steps=4000)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
tf_p = tf_schedule.SqrtDecay.Params().Set(warmup_steps=4000)
tf_lr_schedule = tf_p.Instantiate()
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_linear_schedule_values(self):
p = schedules.Linear.Params().Set(start=(100, 0.1), limit=(200, 1.0))
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
xs = [0, 10, 20, 100, 120, 150, 200, 250]
expected_values = [0.1, 0.1, 0.1, 0.1, 0.28, 0.55, 1.0, 1.0]
with self.subTest(name='reference_values'):
for count, expected_value in zip(xs, expected_values):
self.assertAllClose(jit_value(jnp.array(count)), expected_value)
tf_p = tf_schedule.LinearSchedule.Params().Set(
start=(100, 0.1), limit=(200, 1.0))
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
for count in xs:
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_exponential_schedule(self):
p = schedules.Exponential.Params().Set(start=(100, 1.0), limit=(200, 0.1))
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
xs = [0, 10, 20, 100, 120, 150, 200, 250]
expected_values = [1.0, 1.0, 1.0, 1.0, 0.6309573, 0.3162277, 0.1, 0.1]
with self.subTest(name='reference_values'):
for count, expected_value in zip(xs, expected_values):
self.assertAllClose(jit_value(jnp.array(count)), expected_value)
tf_p = tf_schedule.ExponentialSchedule.Params().Set(
start=(100, 1.0), limit=(200, 0.1))
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
for count in xs:
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_linear_rampup_exp_decay_schedule(self):
p = schedules.LinearRampupExponentialDecay.Params().Set(
warmup=100, decay_start=200, decay_end=300, max=1.0, min_ratio=0.01)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
xs = [0, 10, 20, 100, 120, 150, 200, 250, 300, 350]
expected_values = [0.0, 0.1, 0.2, 1.0, 1.0, 1.0, 1.0, 0.1, 0.01, 0.01]
with self.subTest(name='reference_values'):
for count, expected_value in zip(xs, expected_values):
self.assertAllClose(jit_value(jnp.array(count)), expected_value)
tf_p = tf_schedule.LinearRampupExponentialDecay.Params().Set(
warmup=100, decay_start=200, decay_end=300, max=1.0, min=0.01)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
for count in xs:
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_linear_rampup_exp_decay_schedule_noconstant(self):
p = schedules.LinearRampupExponentialDecay.Params().Set(
warmup=150, decay_start=150, decay_end=250, max=1.0, min_ratio=0.01)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
xs = [0, 15, 30, 150, 200, 250, 300, 350]
expected_values = [0., 0.1, 0.2, 1.0, 0.1, 0.01, 0.01, 0.01]
with self.subTest(name='reference_values'):
for count, expected_value in zip(xs, expected_values):
self.assertAllClose(jit_value(jnp.array(count)), expected_value)
tf_p = tf_schedule.LinearRampupExponentialDecay.Params().Set(
warmup=150, decay_start=150, decay_end=250, max=1.0, min=0.01)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
for count in xs:
if count == 200:
# Lingvo implementation does not support no warm-up. It just adds a
# warm-up consisting of a single step. Hence, no comparison.
continue
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_linear_rampup_exp_decay_schedule_nowarmup(self):
p = schedules.LinearRampupExponentialDecay.Params().Set(
warmup=0, decay_start=0, decay_end=100, max=1.0, min_ratio=0.01)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
xs = [0, 50, 100, 150, 200]
expected_values = [1., 0.1, 0.01, 0.01, 0.01]
with self.subTest(name='reference_values'):
for count, expected_value in zip(xs, expected_values):
self.assertAllClose(jit_value(jnp.array(count)), expected_value)
tf_p = tf_schedule.LinearRampupExponentialDecay.Params().Set(
warmup=0, decay_start=0, decay_end=100, max=1.0, min=0.01)
tf_lr_schedule = tf_p.Instantiate()
with self.subTest(name='lingvo_values'):
for count in xs:
if count == 50:
# Lingvo implementation does not support no warm-up. It just adds a
# warm-up consisting of a single step. Hence, no comparison.
continue
with tf_py_utils.GlobalStepContext(count):
self.assertAllClose(
jit_value(jnp.array(count)),
tf_lr_schedule.Value().numpy())
def test_linear_rampup_piecewise_constant_schedule(self):
boundaries = [40, 64, 80, 96]
values = [1.0, 0.1, 0.01, 0.001]
p = schedules.LinearRampupPiecewiseConstant.Params().Set(
boundaries=boundaries, values=values)
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
tf_p = tf_schedule.LinearRampupPiecewiseConstantSchedule.Params().Set(
boundaries=boundaries, lrs=values, num_splits=1)
tf_lr_schedule = tf_p.Instantiate()
for step in range(100):
with tf_py_utils.GlobalStepContext(step):
self.assertAllClose(
jit_value(jnp.array(step)),
tf_lr_schedule.Value().numpy())
def test_piecewise_schedule(self):
p1 = schedules.Exponential.Params().Set(start=(0, 1.0), limit=(50, 0.1))
p2 = schedules.Exponential.Params().Set(start=(0, 4.0), limit=(50, 1.0))
p = schedules.PiecewiseSchedule.Params().Set(
boundaries=[50], schedules=[p1, p2])
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
p1_lr_schedule = p1.Instantiate()
p1_jit_value = jax.jit(p1_lr_schedule.value)
p2_lr_schedule = p2.Instantiate()
p2_jit_value = jax.jit(p2_lr_schedule.value)
for step in range(50):
self.assertAllClose(
jit_value(jnp.array(step)), p1_jit_value(jnp.array(step)))
for step in range(50):
self.assertAllClose(
jit_value(jnp.array(step + 50)), p2_jit_value(jnp.array(step)))
def test_cycle_schedule(self):
p1 = schedules.Exponential.Params().Set(start=(0, 1.0), limit=(50, 0.1))
p2 = schedules.Exponential.Params().Set(start=(0, 4.0), limit=(50, 1.0))
p = schedules.CycleSchedule.Params().Set(steps=[1, 2], schedules=[p1, p2])
lr_schedule = p.Instantiate()
jit_value = jax.jit(lr_schedule.value)
p1_lr_schedule = p1.Instantiate()
p1_jit_value = jax.jit(p1_lr_schedule.value)
p2_lr_schedule = p2.Instantiate()
p2_jit_value = jax.jit(p2_lr_schedule.value)
for step in range(50):
if step % 3 == 0:
self.assertAllClose(
jit_value(jnp.array(step)), p1_jit_value(jnp.array(step)))
else:
self.assertAllClose(
jit_value(jnp.array(step)), p2_jit_value(jnp.array(step)))
if __name__ == '__main__':
absltest.main()
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.utils.util import wait_until
from ducktape.tests.test import Test
from ducktape.mark.resource import cluster
from ducktape.mark import matrix
from ducktape.mark import parametrize, ignore
from kafkatest.services.kafka import KafkaService
from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.streams import StreamsSmokeTestDriverService, StreamsSmokeTestJobRunnerService
import time
import signal
from random import randint
def broker_node(test, topic, broker_type):
""" Discover node of requested type. For leader type, discovers leader for our topic and partition 0
"""
if broker_type == "leader":
node = test.kafka.leader(topic, partition=0)
elif broker_type == "controller":
node = test.kafka.controller()
else:
raise Exception("Unexpected broker type %s." % (broker_type))
return node
def signal_node(test, node, sig):
test.kafka.signal_node(node, sig)
def clean_shutdown(test, topic, broker_type):
"""Discover broker node of requested type and shut it down cleanly.
"""
node = broker_node(test, topic, broker_type)
signal_node(test, node, signal.SIGTERM)
def hard_shutdown(test, topic, broker_type):
"""Discover broker node of requested type and shut it down with a hard kill."""
node = broker_node(test, topic, broker_type)
signal_node(test, node, signal.SIGKILL)
def clean_bounce(test, topic, broker_type):
"""Chase the leader of one partition and restart it cleanly a few times (5 times)."""
for i in range(5):
prev_broker_node = broker_node(test, topic, broker_type)
test.kafka.restart_node(prev_broker_node, clean_shutdown=True)
def hard_bounce(test, topic, broker_type):
"""Chase the leader and restart it with a hard kill. Do this a few times (5)."""
for i in range(5):
prev_broker_node = broker_node(test, topic, broker_type)
test.kafka.signal_node(prev_broker_node, sig=signal.SIGKILL)
# Since this is a hard kill, we need to make sure the process is down and that
# zookeeper has registered the loss by expiring the broker's session timeout.
wait_until(lambda: len(test.kafka.pids(prev_broker_node)) == 0 and not test.kafka.is_registered(prev_broker_node),
timeout_sec=test.kafka.zk_session_timeout + 5,
err_msg="Failed to see timely deregistration of hard-killed broker %s" % str(prev_broker_node.account))
test.kafka.start_node(prev_broker_node)
failures = {
"clean_shutdown": clean_shutdown,
"hard_shutdown": hard_shutdown,
"clean_bounce": clean_bounce,
"hard_bounce": hard_bounce
}
class StreamsBrokerBounceTest(Test):
"""
Simple test of Kafka Streams with brokers failing
"""
def __init__(self, test_context):
super(StreamsBrokerBounceTest, self).__init__(test_context)
self.replication = 3
self.partitions = 3
self.topics = {
'echo' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": 2}},
'data' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": 2} },
'min' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": 2} },
'max' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": 2} },
'sum' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": 2} },
'dif' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": 2} },
'cnt' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": 2} },
'avg' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": 2} },
'wcnt' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": 2} },
'tagg' : { 'partitions': self.partitions, 'replication-factor': self.replication,
'configs': {"min.insync.replicas": 2} }
}
def fail_broker_type(self, failure_mode, broker_type):
# Pick a random topic and bounce it's leader
topic_index = randint(0, len(self.topics.keys()) - 1)
topic = self.topics.keys()[topic_index]
failures[failure_mode](self, topic, broker_type)
def fail_many_brokers(self, failure_mode, num_failures):
sig = signal.SIGTERM
if (failure_mode == "clean_shutdown"):
sig = signal.SIGTERM
else:
sig = signal.SIGKILL
for num in range(0, num_failures - 1):
signal_node(self, self.kafka.nodes[num], sig)
def setup_system(self):
# Setup phase
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
self.kafka = KafkaService(self.test_context, num_nodes=self.replication,
zk=self.zk, topics=self.topics)
self.kafka.start()
# Start test harness
self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
self.processor1 = StreamsSmokeTestJobRunnerService(self.test_context, self.kafka)
self.driver.start()
self.processor1.start()
def collect_results(self, sleep_time_secs):
data = {}
# End test
self.driver.wait()
self.driver.stop()
self.processor1.stop()
node = self.driver.node
# Success is declared if streams does not crash when sleep time > 0
# It should give an exception when sleep time is 0 since we kill the brokers immediately
# and the topic manager cannot create internal topics with the desired replication factor
if (sleep_time_secs == 0):
output_streams = self.processor1.node.account.ssh_capture("grep SMOKE-TEST-CLIENT-EXCEPTION %s" % self.processor1.STDOUT_FILE, allow_fail=False)
else:
output_streams = self.processor1.node.account.ssh_capture("grep SMOKE-TEST-CLIENT-CLOSED %s" % self.processor1.STDOUT_FILE, allow_fail=False)
for line in output_streams:
data["Client closed"] = line
# Currently it is hard to guarantee anything about Kafka since we don't have exactly once.
# With exactly once in place, success will be defined as ALL-RECORDS-DELIEVERD and SUCCESS
output = node.account.ssh_capture("grep -E 'ALL-RECORDS-DELIVERED|PROCESSED-MORE-THAN-GENERATED|PROCESSED-LESS-THAN-GENERATED' %s" % self.driver.STDOUT_FILE, allow_fail=False)
for line in output:
data["Records Delivered"] = line
output = node.account.ssh_capture("grep -E 'SUCCESS|FAILURE' %s" % self.driver.STDOUT_FILE, allow_fail=False)
for line in output:
data["Logic Success/Failure"] = line
return data
@cluster(num_nodes=7)
@matrix(failure_mode=["clean_shutdown", "hard_shutdown", "clean_bounce", "hard_bounce"],
broker_type=["leader", "controller"],
sleep_time_secs=[120])
def test_broker_type_bounce(self, failure_mode, broker_type, sleep_time_secs):
"""
Start a smoke test client, then kill one particular broker and ensure data is still received
Record if records are delivered.
"""
self.setup_system()
# Sleep to allow test to run for a bit
time.sleep(sleep_time_secs)
# Fail brokers
self.fail_broker_type(failure_mode, broker_type);
return self.collect_results(sleep_time_secs)
@cluster(num_nodes=7)
@matrix(failure_mode=["clean_shutdown"],
broker_type=["controller"],
sleep_time_secs=[0])
def test_broker_type_bounce_at_start(self, failure_mode, broker_type, sleep_time_secs):
"""
Start a smoke test client, then kill one particular broker immediately before streams stats
Streams should throw an exception since it cannot create topics with the desired
replication factor of 3
"""
self.setup_system()
# Sleep to allow test to run for a bit
time.sleep(sleep_time_secs)
# Fail brokers
self.fail_broker_type(failure_mode, broker_type);
return self.collect_results(sleep_time_secs)
@cluster(num_nodes=7)
@matrix(failure_mode=["clean_shutdown", "hard_shutdown", "clean_bounce", "hard_bounce"],
num_failures=[2])
def test_many_brokers_bounce(self, failure_mode, num_failures):
"""
Start a smoke test client, then kill a few brokers and ensure data is still received
Record if records are delivered
"""
self.setup_system()
# Sleep to allow test to run for a bit
time.sleep(120)
# Fail brokers
self.fail_many_brokers(failure_mode, num_failures);
return self.collect_results(120)
@cluster(num_nodes=7)
@matrix(failure_mode=["clean_bounce", "hard_bounce"],
num_failures=[3])
def test_all_brokers_bounce(self, failure_mode, num_failures):
"""
Start a smoke test client, then kill a few brokers and ensure data is still received
Record if records are delivered
"""
self.setup_system()
# Sleep to allow test to run for a bit
time.sleep(120)
# Fail brokers
self.fail_many_brokers(failure_mode, num_failures);
return self.collect_results(120)
| |
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting CSW values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f CSW available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for math.piecewise."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
piecewise = tff.math.piecewise
@test_util.run_all_in_graph_and_eager_modes
class Piecewise(parameterized.TestCase, tf.test.TestCase):
"""Tests for methods in piecewise module."""
def test_find_interval_index_correct_dtype(self):
"""Tests find_interval_index outputs the correct type."""
result = self.evaluate(piecewise.find_interval_index([1.0], [0.0, 1.0]))
self.assertIsInstance(result[0], np.int32)
def test_find_interval_index_one_interval(self):
"""Tests find_interval_index is correct with one half-open interval."""
result = self.evaluate(piecewise.find_interval_index([1.0], [1.0]))
self.assertAllEqual(result, [0])
result = self.evaluate(piecewise.find_interval_index([0.0], [1.0]))
self.assertAllEqual(result, [-1])
result = self.evaluate(piecewise.find_interval_index([2.0], [1.0]))
self.assertAllEqual(result, [0])
def test_find_interval_index(self):
"""Tests find_interval_index is correct in the general case."""
interval_lower_xs = [0.25, 0.5, 1.0, 2.0, 3.0]
query_xs = [0.25, 3.0, 5.0, 0.0, 0.5, 0.8]
result = piecewise.find_interval_index(query_xs, interval_lower_xs)
self.assertAllEqual(result, [0, 4, 4, -1, 1, 1])
def test_find_interval_index_last_interval_is_closed(self):
"""Tests find_interval_index is correct in the general case."""
result = piecewise.find_interval_index([3.0, 4.0], [2.0, 3.0],
last_interval_is_closed=True)
self.assertAllEqual(result, [0, 1])
def test_piecewise_constant_value_no_batch(self):
"""Tests PiecewiseConstantFunc with no batching."""
for dtype in [np.float32, np.float64]:
x = np.array([0., 0.1, 2., 11.])
jump_locations = np.array([0.1, 10], dtype=dtype)
values = tf.constant([3, 4, 5], dtype=dtype)
piecewise_func = piecewise.PiecewiseConstantFunc(jump_locations, values,
dtype=dtype)
# Also verifies left-continuity
value = piecewise_func(x)
self.assertEqual(value.dtype.as_numpy_dtype, dtype)
expected_value = np.array([3., 3., 4., 5.])
self.assertAllEqual(value, expected_value)
def test_piecewise_constant_integral_no_batch(self):
"""Tests PiecewiseConstantFunc with no batching."""
for dtype in [np.float32, np.float64]:
x = np.array([-4.1, 0., 1., 1.5, 2., 4.5, 5.5])
jump_locations = np.array([1, 2, 3, 4, 5], dtype=dtype)
values = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
piecewise_func = piecewise.PiecewiseConstantFunc(jump_locations, values,
dtype=dtype)
value = piecewise_func.integrate(x, x + 4.1)
self.assertEqual(value.dtype.as_numpy_dtype, dtype)
expected_value = np.array([0.41, 1.05, 1.46, 1.66, 1.86, 2.41, 2.46])
self.assertAllClose(value, expected_value, atol=1e-5, rtol=1e-5)
@parameterized.named_parameters(
('SinglePrecision', tf.float32),
('DoublePrecision', tf.float64))
def test_piecewise_constant_value_with_batch(self, dtype):
"""Tests PiecewiseConstantFunc with batching."""
x = np.array([[[0.0, 0.1, 2.0, 11.0], [0.0, 2.0, 3.0, 9.0]],
[[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0]]])
jump_locations = np.array([[[0.1, 10.0], [1.5, 10.0]],
[[1.0, 2.0], [5.0, 6.0]]])
values = tf.constant([[[3, 4, 5], [3, 4, 5]],
[[3, 4, 5], [3, 4, 5]]], dtype=dtype)
piecewise_func = piecewise.PiecewiseConstantFunc(jump_locations, values,
dtype=dtype)
# Also verifies right-continuity
value = piecewise_func(x, left_continuous=False)
with self.subTest('Dtype'):
self.assertEqual(value.dtype.as_numpy_dtype, dtype)
expected_value = np.array([[[3.0, 4.0, 4.0, 5.0],
[3.0, 4.0, 4.0, 4.0]],
[[3.0, 4.0, 5.0, 5.0],
[3.0, 4.0, 5.0, 5.0]]])
with self.subTest('Value'):
self.assertAllEqual(value, expected_value)
@parameterized.named_parameters(
('SinglePrecision', tf.float32),
('DoublePrecision', tf.float64),
('AutoDtype', None))
def test_piecewise_constant_value_with_batch_and_repetitions(self, dtype):
"""Tests PiecewiseConstantFunc with batching and repetitive values."""
x = tf.constant([[-4.1, 0.1, 1., 2., 10, 11.],
[1., 2., 3., 2., 5., 9.]], dtype=dtype)
jump_locations = tf.constant([[0.1, 0.1, 1., 1., 10., 10.],
[-1., 1.2, 2.2, 2.2, 2.2, 8.]], dtype=dtype)
values = tf.constant([[3, 3, 4, 5, 5., 2, 6.],
[-1, -5, 2, 5, 5., 5., 1.]], dtype=dtype)
piecewise_func = piecewise.PiecewiseConstantFunc(jump_locations, values,
dtype=dtype)
# Also verifies left-continuity
value = piecewise_func(x, left_continuous=True)
if dtype is None:
with self.subTest('Dtype'):
self.assertEqual(value.dtype, jump_locations.dtype)
else:
with self.subTest('Dtype'):
self.assertEqual(value.dtype, dtype)
expected_value = np.array([[3., 3., 4., 5., 5., 6.],
[-5., 2., 5., 2., 5., 1.]])
with self.subTest('DtyValue'):
self.assertAllEqual(value, expected_value)
@parameterized.named_parameters(
('SinglePrecision', tf.float32),
('DoublePrecision', tf.float64))
def test_piecewise_constant_integral_with_batch(self, dtype):
"""Tests PiecewiseConstantFunc with batching."""
x = np.array([[[0.0, 0.1, 2.0, 11.0], [0.0, 2.0, 3.0, 9.0]],
[[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0]]])
jump_locations = np.array([[[0.1, 10.0], [1.5, 10.0]],
[[1.0, 2.0], [5.0, 6.0]]])
values = tf.constant([[[3, 4, 5], [3, 4, 5]],
[[3, 4, 5], [3, 4, 5]]], dtype=dtype)
piecewise_func = piecewise.PiecewiseConstantFunc(jump_locations, values,
dtype=dtype)
value = piecewise_func.integrate(x, x + 1.1)
with self.subTest('Dtype'):
self.assertEqual(value.dtype.as_numpy_dtype, dtype)
expected_value = np.array([[[4.3, 4.4, 4.4, 5.5],
[3.3, 4.4, 4.4, 4.5]],
[[3.4, 4.5, 5.5, 5.5],
[3.4, 4.5, 5.5, 5.5]]])
self.assertAllClose(value, expected_value, atol=1e-5, rtol=1e-5)
@parameterized.named_parameters(
('SinglePrecision', tf.float32),
('DoublePrecision', tf.float64),
('AutoDtype', None))
def test_invalid_jump_batch_shape(self, dtype):
"""Tests that `jump_locations` and `values` should have the same batch."""
jump_locations = np.array([[0.1, 10], [2., 10]])
values = tf.constant([[[3, 4, 5], [3, 4, 5]]], dtype=dtype)
with self.assertRaises(ValueError):
piecewise.PiecewiseConstantFunc(jump_locations, values, dtype=dtype)
@parameterized.named_parameters(
('SinglePrecision', tf.float32),
('DoublePrecision', tf.float64),
('AutoDtype', None))
def test_invalid_value_event_shape(self, dtype):
"""Tests that `values` event shape is `jump_locations` event shape + 1."""
jump_locations = np.array([[0.1, 10], [2., 10]])
values = tf.constant([[3, 4, 5, 6], [3, 4, 5, 7]], dtype=dtype)
with self.assertRaises(ValueError):
piecewise.PiecewiseConstantFunc(jump_locations, values, dtype=dtype)
@parameterized.named_parameters(
('SinglePrecision', tf.float32),
('DoublePrecision', tf.float64),
('AutoDtype', None))
def test_matrix_event_shape_no_batch_shape(self, dtype):
"""Tests that `values` event shape is `jump_locations` event shape + 1."""
x = np.array([0., 0.1, 2., 11.])
jump_locations = [0.1, 10]
values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]
piecewise_func = piecewise.PiecewiseConstantFunc(
jump_locations, values, dtype=dtype)
value = piecewise_func(x)
integral = piecewise_func.integrate(x, x + 1)
expected_value = [[[1, 2], [3, 4]], [[1, 2], [3, 4]],
[[5, 6], [7, 8]], [[9, 10], [11, 12]]]
expected_integral = [[[4.6, 5.6], [6.6, 7.6]],
[[5, 6], [7, 8]],
[[5, 6], [7, 8]],
[[9, 10], [11, 12]]]
if dtype is None:
with self.subTest('Dtype'):
# Dtype of jump_locations and of piecewise_func should match
self.assertEqual(piecewise_func.dtype(), tf.float32)
else:
with self.subTest('Dtype'):
self.assertEqual(dtype, piecewise_func.dtype())
with self.subTest('Values'):
self.assertAllClose(value, expected_value, atol=1e-5, rtol=1e-5)
with self.subTest('Integrals'):
self.assertAllClose(integral, expected_integral, atol=1e-5, rtol=1e-5)
@parameterized.named_parameters(
('SinglePrecision', tf.float32),
('DoublePrecision', tf.float64),
('AutoDtype', None))
def test_3d_event_shape_with_batch_shape(self, dtype):
"""Tests that `values` event shape is `jump_locations` event shape + 1."""
x = np.array([[0, 1, 2, 3], [0.5, 1.5, 2.5, 3.5]])
jump_locations = [[0.5, 2], [0.5, 1.5]]
values = [[[0, 1, 1.5], [2, 3, 0], [1, 0, 1]],
[[0, 0.5, 1], [1, 3, 2], [2, 3, 1]]]
piecewise_func = piecewise.PiecewiseConstantFunc(
jump_locations, values, dtype=dtype)
value = piecewise_func(x)
integral = piecewise_func.integrate(x, x + 1)
expected_value = [[[0, 1, 1.5],
[2, 3, 0],
[2, 3, 0],
[1, 0, 1]],
[[0, 0.5, 1],
[1, 3, 2],
[2, 3, 1],
[2, 3, 1]]]
expected_integral = [[[1, 2, 0.75],
[2, 3, 0],
[1, 0, 1],
[1, 0, 1]],
[[1, 3, 2],
[2, 3, 1],
[2, 3, 1],
[2, 3, 1]]]
with self.subTest('Values'):
self.assertAllClose(value, expected_value, atol=1e-5, rtol=1e-5)
with self.subTest('Integrals'):
self.assertAllClose(integral, expected_integral, atol=1e-5, rtol=1e-5)
def test_dynamic_shapes(self):
"""Tests for dynamically shaped inputs."""
dtype = np.float64
x = tf.constant([[0, 1, 2, 3], [0.5, 1.5, 2.5, 3.5]], dtype=dtype)
jump_locations = tf.constant([[0.5, 2], [0.5, 1.5]], dtype=dtype)
values = tf.constant([[[0, 1, 1.5], [2, 3, 0], [1, 0, 1]],
[[0, 0.5, 1], [1, 3, 2], [2, 3, 1]]], dtype=dtype)
@tf.function(
input_signature=[tf.TensorSpec([None, None], dtype=dtype),
tf.TensorSpec([None, None], dtype=dtype),
tf.TensorSpec([None, None, None], dtype=dtype)])
def fn(x, jump_locations, values):
piecewise_func = piecewise.PiecewiseConstantFunc(
jump_locations, values, dtype=dtype)
value = piecewise_func(x)
integral = piecewise_func.integrate(x, x + 1)
return value, integral
value, integral = fn(x, jump_locations, values)
expected_value = [[[0, 1, 1.5],
[2, 3, 0],
[2, 3, 0],
[1, 0, 1]],
[[0, 0.5, 1],
[1, 3, 2],
[2, 3, 1],
[2, 3, 1]]]
expected_integral = [[[1, 2, 0.75],
[2, 3, 0],
[1, 0, 1],
[1, 0, 1]],
[[1, 3, 2],
[2, 3, 1],
[2, 3, 1],
[2, 3, 1]]]
with self.subTest('Values'):
self.assertAllClose(value, expected_value, atol=1e-5, rtol=1e-5)
with self.subTest('Integrals'):
self.assertAllClose(integral, expected_integral, atol=1e-5, rtol=1e-5)
def test_convert_to_tensor_or_func_tensors(self):
"""Tests that tensor_or_func converts inputs into Tensors."""
dtype = tf.float64
inputs = [2.0, [1, 2, 3], np.arange(1, 5, 1)]
output = []
expected = []
for i in inputs:
x = (piecewise.convert_to_tensor_or_func(i, dtype))
# Check that the returned value is a tensor and is_const flag is set.
output.append((tf.is_tensor(x[0]), x[1]))
expected.append((True, True))
self.assertAllEqual(output, expected)
def test_convert_to_tensor_or_func_PiecewiseConstantFunc(self):
"""Tests that tensor_or_func recognizes inputs of PiecewiseConstantFunc."""
dtype = tf.float64
times = np.arange(0, 10, 1)
values = np.ones(11)
pwc = piecewise.PiecewiseConstantFunc(times, values, dtype=dtype)
output = piecewise.convert_to_tensor_or_func(pwc)
expected = (pwc, False)
self.assertAllEqual(output, expected)
if __name__ == '__main__':
tf.test.main()
| |
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle, os, uuid
from pathlib import PurePath
from collections import OrderedDict
from .mesonlib import MesonException, commonpath
from .mesonlib import default_libdir, default_libexecdir, default_prefix
import ast
version = '0.41.0.dev1'
backendlist = ['ninja', 'vs', 'vs2010', 'vs2015', 'vs2017', 'xcode']
class UserOption:
def __init__(self, name, description, choices):
super().__init__()
self.name = name
self.choices = choices
self.description = description
def parse_string(self, valuestring):
return valuestring
# Check that the input is a valid value and return the
# "cleaned" or "native" version. For example the Boolean
# option could take the string "true" and return True.
def validate_value(self, value):
raise RuntimeError('Derived option class did not override validate_value.')
class UserStringOption(UserOption):
def __init__(self, name, description, value, choices=None):
super().__init__(name, description, choices)
self.set_value(value)
def validate(self, value):
if not isinstance(value, str):
raise MesonException('Value "%s" for string option "%s" is not a string.' % (str(value), self.name))
def set_value(self, newvalue):
self.validate(newvalue)
self.value = newvalue
def validate_value(self, value):
self.validate(value)
return value
class UserBooleanOption(UserOption):
def __init__(self, name, description, value):
super().__init__(name, description, [True, False])
self.set_value(value)
def tobool(self, thing):
if isinstance(thing, bool):
return thing
if thing.lower() == 'true':
return True
if thing.lower() == 'false':
return False
raise MesonException('Value %s is not boolean (true or false).' % thing)
def set_value(self, newvalue):
self.value = self.tobool(newvalue)
def parse_string(self, valuestring):
if valuestring == 'false':
return False
if valuestring == 'true':
return True
raise MesonException('Value "%s" for boolean option "%s" is not a boolean.' % (valuestring, self.name))
def __bool__(self):
return self.value
def validate_value(self, value):
return self.tobool(value)
class UserComboOption(UserOption):
def __init__(self, name, description, choices, value):
super().__init__(name, description, choices)
if not isinstance(self.choices, list):
raise MesonException('Combo choices must be an array.')
for i in self.choices:
if not isinstance(i, str):
raise MesonException('Combo choice elements must be strings.')
self.set_value(value)
def set_value(self, newvalue):
if newvalue not in self.choices:
optionsstring = ', '.join(['"%s"' % (item,) for item in self.choices])
raise MesonException('Value "%s" for combo option "%s" is not one of the choices. Possible choices are: %s.' % (newvalue, self.name, optionsstring))
self.value = newvalue
def validate_value(self, value):
if value not in self.choices:
raise MesonException('Value %s not one of accepted values.' % value)
return value
class UserStringArrayOption(UserOption):
def __init__(self, name, description, value, **kwargs):
super().__init__(name, description, kwargs.get('choices', []))
self.set_value(value)
def validate(self, value):
if isinstance(value, str):
if not value.startswith('['):
raise MesonException('Valuestring does not define an array: ' + value)
newvalue = ast.literal_eval(value)
else:
newvalue = value
if not isinstance(newvalue, list):
raise MesonException('"{0}" should be a string array, but it is not'.format(str(newvalue)))
for i in newvalue:
if not isinstance(i, str):
raise MesonException('String array element "{0}" is not a string.'.format(str(newvalue)))
return newvalue
def set_value(self, newvalue):
self.value = self.validate(newvalue)
def validate_value(self, value):
self.validate(value)
return value
# This class contains all data that must persist over multiple
# invocations of Meson. It is roughly the same thing as
# cmakecache.
class CoreData:
def __init__(self, options):
self.guid = str(uuid.uuid4()).upper()
self.test_guid = str(uuid.uuid4()).upper()
self.regen_guid = str(uuid.uuid4()).upper()
self.target_guids = {}
self.version = version
self.init_builtins(options)
self.user_options = {}
self.compiler_options = {}
self.base_options = {}
# These external_*args, are set via env vars CFLAGS, LDFLAGS, etc
# but only when not cross-compiling.
self.external_preprocess_args = {} # CPPFLAGS only
self.external_args = {} # CPPFLAGS + CFLAGS
self.external_link_args = {} # CFLAGS + LDFLAGS (with MSVC: only LDFLAGS)
if options.cross_file is not None:
self.cross_file = os.path.join(os.getcwd(), options.cross_file)
else:
self.cross_file = None
self.wrap_mode = options.wrap_mode
self.compilers = OrderedDict()
self.cross_compilers = OrderedDict()
self.deps = OrderedDict()
self.modules = {}
# Only to print a warning if it changes between Meson invocations.
self.pkgconf_envvar = os.environ.get('PKG_CONFIG_PATH', '')
def sanitize_prefix(self, prefix):
if not os.path.isabs(prefix):
raise MesonException('prefix value {!r} must be an absolute path'
''.format(prefix))
if prefix.endswith('/') or prefix.endswith('\\'):
# On Windows we need to preserve the trailing slash if the
# string is of type 'C:\' because 'C:' is not an absolute path.
if len(prefix) == 3 and prefix[1] == ':':
pass
else:
prefix = prefix[:-1]
return prefix
def sanitize_dir_option_value(self, prefix, option, value):
'''
If the option is an installation directory option and the value is an
absolute path, check that it resides within prefix and return the value
as a path relative to the prefix.
This way everyone can do f.ex, get_option('libdir') and be sure to get
the library directory relative to prefix.
'''
if option.endswith('dir') and os.path.isabs(value) and \
option not in builtin_dir_noprefix_options:
# Value must be a subdir of the prefix
# commonpath will always return a path in the native format, so we
# must use pathlib.PurePath to do the same conversion before
# comparing.
if commonpath([value, prefix]) != str(PurePath(prefix)):
m = 'The value of the {!r} option is {!r} which must be a ' \
'subdir of the prefix {!r}.\nNote that if you pass a ' \
'relative path, it is assumed to be a subdir of prefix.'
raise MesonException(m.format(option, value, prefix))
# Convert path to be relative to prefix
skip = len(prefix) + 1
value = value[skip:]
return value
def init_builtins(self, options):
self.builtins = {}
# Sanitize prefix
options.prefix = self.sanitize_prefix(options.prefix)
# Initialize other builtin options
for key in get_builtin_options():
if hasattr(options, key):
value = getattr(options, key)
value = self.sanitize_dir_option_value(options.prefix, key, value)
setattr(options, key, value)
else:
value = get_builtin_option_default(key)
args = [key] + builtin_options[key][1:-1] + [value]
self.builtins[key] = builtin_options[key][0](*args)
def get_builtin_option(self, optname):
if optname in self.builtins:
return self.builtins[optname].value
raise RuntimeError('Tried to get unknown builtin option %s.' % optname)
def set_builtin_option(self, optname, value):
if optname == 'prefix':
value = self.sanitize_prefix(value)
elif optname in self.builtins:
prefix = self.builtins['prefix'].value
value = self.sanitize_dir_option_value(prefix, optname, value)
else:
raise RuntimeError('Tried to set unknown builtin option %s.' % optname)
self.builtins[optname].set_value(value)
def validate_option_value(self, option_name, override_value):
for opts in (self.builtins, self.base_options, self.compiler_options, self.user_options):
if option_name in opts:
opt = opts[option_name]
return opt.validate_value(override_value)
raise MesonException('Tried to validate unknown option %s.' % option_name)
def load(filename):
load_fail_msg = 'Coredata file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, CoreData):
raise MesonException(load_fail_msg)
if obj.version != version:
raise MesonException('Build directory has been generated with Meson version %s, which is incompatible with current version %s.\nPlease delete this build directory AND create a new one.' %
(obj.version, version))
return obj
def save(obj, filename):
if obj.version != version:
raise MesonException('Fatal version mismatch corruption.')
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def get_builtin_options():
return list(builtin_options.keys())
def is_builtin_option(optname):
return optname in get_builtin_options()
def get_builtin_option_choices(optname):
if is_builtin_option(optname):
if builtin_options[optname][0] == UserStringOption:
return None
elif builtin_options[optname][0] == UserBooleanOption:
return [True, False]
else:
return builtin_options[optname][2]
else:
raise RuntimeError('Tried to get the supported values for an unknown builtin option \'%s\'.' % optname)
def get_builtin_option_description(optname):
if is_builtin_option(optname):
return builtin_options[optname][1]
else:
raise RuntimeError('Tried to get the description for an unknown builtin option \'%s\'.' % optname)
def get_builtin_option_default(optname):
if is_builtin_option(optname):
o = builtin_options[optname]
if o[0] == UserComboOption:
return o[3]
return o[2]
else:
raise RuntimeError('Tried to get the default value for an unknown builtin option \'%s\'.' % optname)
builtin_options = {
'buildtype': [UserComboOption, 'Build type to use.', ['plain', 'debug', 'debugoptimized', 'release', 'minsize'], 'debug'],
'strip': [UserBooleanOption, 'Strip targets on install.', False],
'unity': [UserComboOption, 'Unity build.', ['on', 'off', 'subprojects'], 'off'],
'prefix': [UserStringOption, 'Installation prefix.', default_prefix()],
'libdir': [UserStringOption, 'Library directory.', default_libdir()],
'libexecdir': [UserStringOption, 'Library executable directory.', default_libexecdir()],
'bindir': [UserStringOption, 'Executable directory.', 'bin'],
'sbindir': [UserStringOption, 'System executable directory.', 'sbin'],
'includedir': [UserStringOption, 'Header file directory.', 'include'],
'datadir': [UserStringOption, 'Data file directory.', 'share'],
'mandir': [UserStringOption, 'Manual page directory.', 'share/man'],
'infodir': [UserStringOption, 'Info page directory.', 'share/info'],
'localedir': [UserStringOption, 'Locale data directory.', 'share/locale'],
# sysconfdir, localstatedir and sharedstatedir are a bit special. These defaults to ${prefix}/etc,
# ${prefix}/var and ${prefix}/com but nobody uses that. Instead they always set it
# manually to /etc, /var and /var/lib. This default values is thus pointless and not really used
# but we set it to this for consistency with other systems.
#
# Projects installing to sysconfdir, localstatedir or sharedstatedir probably want
# to set the following in project():
#
# default_options : ['sysconfdir=/etc', 'localstatedir=/var', 'sharedstatedir=/var/lib']
'sysconfdir': [UserStringOption, 'Sysconf data directory.', 'etc'],
'localstatedir': [UserStringOption, 'Localstate data directory.', 'var'],
'sharedstatedir': [UserStringOption, 'Architecture-independent data directory.', 'com'],
'werror': [UserBooleanOption, 'Treat warnings as errors.', False],
'warning_level': [UserComboOption, 'Compiler warning level to use.', ['1', '2', '3'], '1'],
'layout': [UserComboOption, 'Build directory layout.', ['mirror', 'flat'], 'mirror'],
'default_library': [UserComboOption, 'Default library type.', ['shared', 'static'], 'shared'],
'backend': [UserComboOption, 'Backend to use.', backendlist, 'ninja'],
'stdsplit': [UserBooleanOption, 'Split stdout and stderr in test logs.', True],
'errorlogs': [UserBooleanOption, "Whether to print the logs from failing tests.", True],
}
# Installation directories that can reside in a path outside of the prefix
builtin_dir_noprefix_options = {'sysconfdir', 'localstatedir', 'sharedstatedir'}
forbidden_target_names = {'clean': None,
'clean-ctlist': None,
'clean-gcno': None,
'clean-gcda': None,
'coverage': None,
'coverage-text': None,
'coverage-xml': None,
'coverage-html': None,
'phony': None,
'PHONY': None,
'all': None,
'test': None,
'benchmark': None,
'install': None,
'uninstall': None,
'build.ninja': None,
'scan-build': None,
'reconfigure': None,
'dist': None,
'distcheck': None,
}
| |
"""TitanCNA: Subclonal CNV calling and loss of heterogeneity in cancer.
https://github.com/gavinha/TitanCNA
"""
import csv
import glob
import os
import shutil
import pandas as pd
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.heterogeneity import chromhacks, loh
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import effects, vcfutils
from bcbio.structural import cnvkit
def run(items):
from bcbio import heterogeneity
paired = vcfutils.get_paired(items)
if not paired:
logger.info("Skipping TitanCNA; no somatic tumor calls in batch: %s" %
" ".join([dd.get_sample_name(d) for d in items]))
return items
work_dir = _sv_workdir(paired.tumor_data)
cn_file = _titan_cn_file(dd.get_normalized_depth(paired.tumor_data), work_dir, paired.tumor_data)
het_file = _titan_het_file(heterogeneity.get_variants(paired.tumor_data), work_dir, paired)
if _should_run(het_file):
ploidy_outdirs = []
for ploidy in [2, 3, 4]:
for num_clusters in [1, 2, 3]:
out_dir = _run_titancna(cn_file, het_file, ploidy, num_clusters, work_dir, paired.tumor_data)
ploidy_outdirs.append((ploidy, out_dir))
solution_file = _run_select_solution(ploidy_outdirs, work_dir, paired.tumor_data)
else:
logger.info("Skipping TitanCNA; not enough input data: %s" %
" ".join([dd.get_sample_name(d) for d in items]))
return items
out = []
if paired.normal_data:
out.append(paired.normal_data)
if "sv" not in paired.tumor_data:
paired.tumor_data["sv"] = []
paired.tumor_data["sv"].append(_finalize_sv(solution_file, paired.tumor_data))
out.append(paired.tumor_data)
return out
def _finalize_sv(solution_file, data):
"""Add output files from TitanCNA calling optional solution.
"""
out = {"variantcaller": "titancna"}
with open(solution_file) as in_handle:
solution = dict(zip(in_handle.readline().strip("\r\n").split("\t"),
in_handle.readline().strip("\r\n").split("\t")))
if solution.get("path"):
out["purity"] = solution["purity"]
out["ploidy"] = solution["ploidy"]
out["cellular_prevalence"] = [x.strip() for x in solution["cellPrev"].split(",")]
base = os.path.basename(solution["path"])
out["plot"] = dict([(n, solution["path"] + ext) for (n, ext) in [("rplots", ".Rplots.pdf"),
("cf", "/%s_CF.pdf" % base),
("cna", "/%s_CNA.pdf" % base),
("loh", "/%s_LOH.pdf" % base)]
if os.path.exists(solution["path"] + ext)])
out["subclones"] = "%s.segs.txt" % solution["path"]
out["hetsummary"] = solution_file
out["vrn_file"] = to_vcf(out["subclones"], "TitanCNA", _get_header, _seg_to_vcf, data)
out["lohsummary"] = loh.summary_status(out, data)
return out
def _should_run(het_file):
"""Check for enough input data to proceed with analysis.
"""
has_hets = False
with open(het_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 1:
has_hets = True
break
return has_hets
def _run_select_solution(ploidy_outdirs, work_dir, data):
"""Select optimal
"""
out_file = os.path.join(work_dir, "optimalClusters.txt")
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
ploidy_inputs = " ".join(["--ploidyRun%s=%s" % (p, d) for p, d in ploidy_outdirs])
cmd = "titanCNA_selectSolution.R {ploidy_inputs} --outFile={tx_out_file}"
do.run(cmd.format(**locals()), "TitanCNA: select optimal solution")
return out_file
def _run_titancna(cn_file, het_file, ploidy, num_clusters, work_dir, data):
"""Run titanCNA wrapper script on given ploidy and clusters.
"""
sample = dd.get_sample_name(data)
cores = dd.get_num_cores(data)
export_cmd = utils.get_R_exports()
ploidy_dir = utils.safe_makedir(os.path.join(work_dir, "run_ploidy%s" % ploidy))
cluster_dir = "%s_cluster%02d" % (sample, num_clusters)
out_dir = os.path.join(ploidy_dir, cluster_dir)
if not utils.file_uptodate(out_dir + ".titan.txt", cn_file):
with tx_tmpdir(data) as tmp_dir:
with utils.chdir(tmp_dir):
cmd = ("{export_cmd} && titanCNA.R --id {sample} --hetFile {het_file} --cnFile {cn_file} "
"--numClusters {num_clusters} --ploidy {ploidy} --numCores {cores} --outDir {tmp_dir} "
"--libdir None")
chroms = ["'%s'" % c.name.replace("chr", "") for c in ref.file_contigs(dd.get_ref_file(data))
if chromhacks.is_autosomal_or_x(c.name)]
if "'X'" not in chroms:
chroms += ["'X'"]
# Use UCSC style naming for human builds to support BSgenome
genome_build = ("hg19" if dd.get_genome_build(data) in ["GRCh37", "hg19"]
else dd.get_genome_build(data))
cmd += """ --chrs "c(%s)" """ % ",".join(chroms)
cmd += " --genomeBuild {genome_build}"
if data["genome_build"] in ("hg19", "hg38"):
cmd += " --genomeStyle UCSC"
if data["genome_build"] in ["hg38"]:
data_dir = os.path.normpath(os.path.join(
os.path.dirname(os.path.realpath(os.path.join(
os.path.dirname(utils.Rscript_cmd()), "titanCNA.R"))),
os.pardir, os.pardir, "data"))
cytoband_file = os.path.join(data_dir, "cytoBand_hg38.txt")
assert os.path.exists(cytoband_file), cytoband_file
cmd += " --cytobandFile %s" % cytoband_file
# TitanCNA's model is influenced by the variance in read coverage data
# and data type: set reasonable defaults for non-WGS runs
# (see https://github.com/gavinha/TitanCNA/tree/master/scripts/R_scripts)
if dd.get_coverage_interval(data) != "genome":
cmd += " --alphaK=2500 --alphaKHigh=2500"
do.run(cmd.format(**locals()), "TitanCNA CNV detection: ploidy %s, cluster %s" % (ploidy, num_clusters))
for fname in glob.glob(os.path.join(tmp_dir, cluster_dir + "*")):
shutil.move(fname, ploidy_dir)
if os.path.exists(os.path.join(tmp_dir, "Rplots.pdf")):
shutil.move(os.path.join(tmp_dir, "Rplots.pdf"),
os.path.join(ploidy_dir, "%s.Rplots.pdf" % cluster_dir))
return ploidy_dir
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "titancna"))
def _titan_het_file(vrn_files, work_dir, paired):
assert vrn_files, "Did not find compatible variant calling files for TitanCNA inputs"
from bcbio.heterogeneity import bubbletree
class OutWriter:
def __init__(self, out_handle):
self.writer = csv.writer(out_handle, dialect="excel-tab")
def write_header(self):
self.writer.writerow(["Chr", "Position", "Ref", "RefCount", "Nref", "NrefCount", "NormQuality"])
def write_row(self, rec, stats):
if rec.qual and float(rec.qual) > 0:
self.writer.writerow([rec.chrom, rec.pos, rec.ref, stats["tumor"]["depth"] - stats["tumor"]["alt"],
rec.alts[0], stats["tumor"]["alt"], rec.qual])
return bubbletree.prep_vrn_file(vrn_files[0]["vrn_file"], vrn_files[0]["variantcaller"],
work_dir, paired, OutWriter)
def _titan_cn_file(cnr_file, work_dir, data):
"""Convert CNVkit or GATK4 normalized input into TitanCNA ready format.
"""
out_file = os.path.join(work_dir, "%s.cn" % (utils.splitext_plus(os.path.basename(cnr_file))[0]))
support_cols = {"cnvkit": ["chromosome", "start", "end", "log2"],
"gatk-cnv": ["CONTIG", "START", "END", "LOG2_COPY_RATIO"]}
cols = support_cols[cnvkit.bin_approach(data)]
if not utils.file_uptodate(out_file, cnr_file):
with file_transaction(data, out_file) as tx_out_file:
iterator = pd.read_csv(cnr_file, sep="\t", iterator=True, header=0, comment="@")
with open(tx_out_file, "w") as handle:
for chunk in iterator:
chunk = chunk[cols]
chunk.columns = ["chrom", "start", "end", "logR"]
if cnvkit.bin_approach(data) == "cnvkit":
chunk['start'] += 1
chunk.to_csv(handle, mode="a", sep="\t", index=False)
return out_file
# ## VCF converstion
_vcf_header = """##fileformat=VCFv4.2
##source={caller}
##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">
##INFO=<ID=SVLEN,Number=1,Type=Integer,Description="Difference in length between REF and ALT alleles">
##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">
##INFO=<ID=FOLD_CHANGE_LOG,Number=1,Type=Float,Description="Log fold change">
##INFO=<ID=PROBES,Number=1,Type=Integer,Description="Number of probes in CNV">
##INFO=<ID=CN,Number=1,Type=Integer,Description="Copy Number: Overall">
##INFO=<ID=MajorCN,Number=1,Type=Integer,Description="Copy Number: Major allele">
##INFO=<ID=MinorCN,Number=1,Type=Integer,Description="Copy Number: Minor allele">
##ALT=<ID=DEL,Description="Deletion">
##ALT=<ID=DUP,Description="Duplication">
##ALT=<ID=LOH,Description="Loss of heterozygosity">
##ALT=<ID=CNV,Description="Copy number variable region">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
"""
def _get_header(in_handle):
return in_handle.readline().strip().split("\t"), in_handle
def _seg_to_vcf(cur):
svtype = _get_svtype(cur["TITAN_call"])
info = ["SVTYPE=%s" % svtype, "END=%s" % cur["End_Position.bp."],
"SVLEN=%s" % (int(cur["End_Position.bp."]) - int(cur["Start_Position.bp."])),
"CN=%s" % cur["Copy_Number"], "MajorCN=%s" % cur["MajorCN"],
"MinorCN=%s" % cur["MinorCN"], "FOLD_CHANGE_LOG=%s" % cur["Median_logR"]]
return [cur["Chromosome"], cur["Start_Position.bp."], ".", "N", "<%s>" % svtype, ".",
".", ";".join(info), "GT", "0/1"]
def to_vcf(in_file, caller, header_fn, vcf_fn, data, sep="\t"):
"""Convert output TitanCNA segs file into bgzipped VCF.
"""
out_file = "%s.vcf" % utils.splitext_plus(in_file)[0]
if not utils.file_exists(out_file + ".gz") and not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(_vcf_header.format(caller=caller))
out_handle.write("\t".join(["#CHROM", "POS", "ID", "REF", "ALT", "QUAL",
"FILTER", "INFO", "FORMAT", dd.get_sample_name(data)]) + "\n")
header, in_handle = header_fn(in_handle)
for line in in_handle:
out = vcf_fn(dict(zip(header, line.strip().split(sep))))
if out:
out_handle.write("\t".join(out) + "\n")
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
effects_vcf, _ = effects.add_to_vcf(out_file, data, "snpeff")
return effects_vcf or out_file
def _get_svtype(call):
"""Retrieve structural variant type from current TitanCNA events.
homozygous deletion (HOMD),
hemizygous deletion LOH (DLOH),
copy neutral LOH (NLOH),
diploid heterozygous (HET),
amplified LOH (ALOH),
gain/duplication of 1 allele (GAIN),
allele-specific copy number amplification (ASCNA),
balanced copy number amplification (BCNA),
unbalanced copy number amplification (UBCNA)
"""
if call in set(["HOMD", "DLOH"]):
return "DEL"
elif call in set(["ALOH", "GAIN", "ASCNA", "BCNA", "UBCNA"]):
return "DUP"
elif call in set(["NLOH"]):
return "LOH"
else:
return "CNV"
| |
"""Test read write
"""
from xml.etree import ElementTree as ET
from io import BytesIO
from ...gui import test
from ...registry import global_registry, WidgetRegistry, WidgetDescription
from .. import Scheme, SchemeNode, SchemeLink, \
SchemeArrowAnnotation, SchemeTextAnnotation
from .. import readwrite
from ..readwrite import scheme_to_ows_stream, parse_scheme, scheme_load
class TestReadWrite(test.QAppTestCase):
def test_io(self):
reg = global_registry()
base = "Orange.widgets"
file_desc = reg.widget(base + ".data.owfile.OWFile")
discretize_desc = reg.widget(base + ".data.owdiscretize.OWDiscretize")
bayes_desc = reg.widget(base + ".classify.ownaivebayes.OWNaiveBayes")
scheme = Scheme()
file_node = SchemeNode(file_desc)
discretize_node = SchemeNode(discretize_desc)
bayes_node = SchemeNode(bayes_desc)
scheme.add_node(file_node)
scheme.add_node(discretize_node)
scheme.add_node(bayes_node)
scheme.add_link(SchemeLink(file_node, "Data",
discretize_node, "Data"))
scheme.add_link(SchemeLink(discretize_node, "Data",
bayes_node, "Data"))
scheme.add_annotation(SchemeArrowAnnotation((0, 0), (10, 10)))
scheme.add_annotation(SchemeTextAnnotation((0, 100, 200, 200), "$$"))
stream = BytesIO()
scheme_to_ows_stream(scheme, stream)
stream.seek(0)
scheme_1 = parse_scheme(Scheme(), stream)
self.assertTrue(len(scheme.nodes) == len(scheme_1.nodes))
self.assertTrue(len(scheme.links) == len(scheme_1.links))
self.assertTrue(len(scheme.annotations) == len(scheme_1.annotations))
for n1, n2 in zip(scheme.nodes, scheme_1.nodes):
self.assertEqual(n1.position, n2.position)
self.assertEqual(n1.title, n2.title)
for link1, link2 in zip(scheme.links, scheme_1.links):
self.assertEqual(link1.source_type(), link2.source_type())
self.assertEqual(link1.sink_type(), link2.sink_type())
self.assertEqual(link1.source_channel.name,
link2.source_channel.name)
self.assertEqual(link1.sink_channel.name,
link2.sink_channel.name)
self.assertEqual(link1.enabled, link2.enabled)
for annot1, annot2 in zip(scheme.annotations, scheme_1.annotations):
self.assertIs(type(annot1), type(annot2))
if isinstance(annot1, SchemeTextAnnotation):
self.assertEqual(annot1.text, annot2.text)
self.assertEqual(annot1.rect, annot2.rect)
else:
self.assertEqual(annot1.start_pos, annot2.start_pos)
self.assertEqual(annot1.end_pos, annot2.end_pos)
def test_io2(self):
reg = global_registry()
base = "Orange.widgets"
file_desc = reg.widget(base + ".data.owfile.OWFile")
discretize_desc = reg.widget(base + ".data.owdiscretize.OWDiscretize")
bayes_desc = reg.widget(base + ".classify.ownaivebayes.OWNaiveBayes")
scheme = Scheme()
file_node = SchemeNode(file_desc)
discretize_node = SchemeNode(discretize_desc)
bayes_node = SchemeNode(bayes_desc)
scheme.add_node(file_node)
scheme.add_node(discretize_node)
scheme.add_node(bayes_node)
scheme.add_link(SchemeLink(file_node, "Data",
discretize_node, "Data"))
scheme.add_link(SchemeLink(discretize_node, "Data",
bayes_node, "Data"))
scheme.add_annotation(SchemeArrowAnnotation((0, 0), (10, 10)))
scheme.add_annotation(SchemeTextAnnotation((0, 100, 200, 200), "$$"))
stream = BytesIO()
scheme_to_ows_stream(scheme, stream)
stream.seek(0)
scheme_1 = scheme_load(Scheme(), stream)
self.assertEqual(len(scheme.nodes), len(scheme_1.nodes))
self.assertEqual(len(scheme.links), len(scheme_1.links))
self.assertEqual(len(scheme.annotations), len(scheme_1.annotations))
for n1, n2 in zip(scheme.nodes, scheme_1.nodes):
self.assertEqual(n1.position, n2.position)
self.assertEqual(n1.title, n2.title)
for link1, link2 in zip(scheme.links, scheme_1.links):
self.assertEqual(link1.source_type(), link2.source_type())
self.assertEqual(link1.sink_type(), link2.sink_type())
self.assertEqual(link1.source_channel.name,
link2.source_channel.name)
self.assertEqual(link1.sink_channel.name,
link2.sink_channel.name)
self.assertEqual(link1.enabled, link2.enabled)
for annot1, annot2 in zip(scheme.annotations, scheme_1.annotations):
self.assertIs(type(annot1), type(annot2))
if isinstance(annot1, SchemeTextAnnotation):
self.assertEqual(annot1.text, annot2.text)
self.assertEqual(annot1.rect, annot2.rect)
else:
self.assertEqual(annot1.start_pos, annot2.start_pos)
self.assertEqual(annot1.end_pos, annot2.end_pos)
def test_safe_evals(self):
s = readwrite.string_eval(r"'\x00\xff'")
self.assertEqual(s, chr(0) + chr(255))
with self.assertRaises(ValueError):
readwrite.string_eval("[1, 2]")
t = readwrite.tuple_eval("(1, 2.0, 'a')")
self.assertEqual(t, (1, 2.0, 'a'))
with self.assertRaises(ValueError):
readwrite.tuple_eval("u'string'")
with self.assertRaises(ValueError):
readwrite.tuple_eval("(1, [1, [2, ]])")
self.assertIs(readwrite.terminal_eval("True"), True)
self.assertIs(readwrite.terminal_eval("False"), False)
self.assertIs(readwrite.terminal_eval("None"), None)
self.assertEqual(readwrite.terminal_eval("42"), 42)
self.assertEqual(readwrite.terminal_eval("'42'"), '42')
def test_literal_dump(self):
struct = {1: [{(1, 2): ""}],
True: 1.0,
None: None}
s = readwrite.literal_dumps(struct)
self.assertEqual(readwrite.literal_loads(s), struct)
with self.assertRaises(ValueError):
recur = [1]
recur.append(recur)
readwrite.literal_dumps(recur)
with self.assertRaises(TypeError):
readwrite.literal_dumps(self)
def test_1_0_parse(self):
tree = ET.parse(BytesIO(FOOBAR_v10))
parsed = readwrite.parse_ows_etree_v_1_0(tree)
self.assertIsInstance(parsed, readwrite._scheme)
self.assertEqual(parsed.version, "1.0")
self.assertTrue(len(parsed.nodes) == 2)
self.assertTrue(len(parsed.links) == 2)
qnames = [node.qualified_name for node in parsed.nodes]
self.assertSetEqual(set(qnames), set(["foo", "bar"]))
reg = foo_registry()
parsed = readwrite.resolve_1_0(parsed, reg)
qnames = [node.qualified_name for node in parsed.nodes]
self.assertSetEqual(set(qnames),
set(["package.foo", "frob.bar"]))
projects = [node.project_name for node in parsed.nodes]
self.assertSetEqual(set(projects), set(["Foo", "Bar"]))
def test_resolve_replaced(self):
tree = ET.parse(BytesIO(FOOBAR_v20))
parsed = readwrite.parse_ows_etree_v_2_0(tree)
self.assertIsInstance(parsed, readwrite._scheme)
self.assertEqual(parsed.version, "2.0")
self.assertTrue(len(parsed.nodes) == 2)
self.assertTrue(len(parsed.links) == 2)
qnames = [node.qualified_name for node in parsed.nodes]
self.assertSetEqual(set(qnames), set(["package.foo", "package.bar"]))
reg = foo_registry()
parsed = readwrite.resolve_replaced(parsed, reg)
qnames = [node.qualified_name for node in parsed.nodes]
self.assertSetEqual(set(qnames),
set(["package.foo", "frob.bar"]))
projects = [node.project_name for node in parsed.nodes]
self.assertSetEqual(set(projects), set(["Foo", "Bar"]))
def foo_registry():
reg = WidgetRegistry()
reg.register_widget(
WidgetDescription(
name="Foo",
id="foooo",
qualified_name="package.foo",
project_name="Foo"
)
)
reg.register_widget(
WidgetDescription(
name="Bar",
id="barrr",
qualified_name="frob.bar",
project_name="Bar",
replaces=["package.bar"]
)
)
return reg
FOOBAR_v10 = b"""<?xml version="1.0" ?>
<schema>
<widgets>
<widget caption="Foo" widgetName="foo" xPos="1" yPos="2"/>
<widget caption="Bar" widgetName="bar" xPos="2" yPos="3"/>
</widgets>
<channels>
<channel enabled="1" inWidgetCaption="Foo" outWidgetCaption="Bar"
signals="[('foo', 'bar')]"/>
<channel enabled="0" inWidgetCaption="Foo" outWidgetCaption="Bar"
signals="[('foo1', 'bar1')]"/>
</channels>
<settings settingsDictionary="{}"/>
</schema>
"""
FOOBAR_v20 = b"""<?xml version="1.0" ?>
<scheme title="FooBar" description="Foo to the bar" version="2.0">
<nodes>
<node id="0" title="Foo" position="1, 2" project_name="Foo"
qualified_name="package.foo" />
<node id="1" title="Bar" position="2, 3" project_name="Foo"
qualified_name="package.bar" />
</nodes>
<links>
<link enabled="true" id="0" sink_channel="bar" sink_node_id="1"
source_channel="foo" source_node_id="0" />
<link enabled="false" id="1" sink_channel="bar1" sink_node_id="1"
source_channel="foo1" source_node_id="0" />
</links>
</scheme>
"""
| |
# -*- coding: utf-8 -*-
from collections import OrderedDict
t = {
'Building digitizing': [
{'Building': 'building:yes'}
],
'Building feature': [
{'Housenumber': 'addr:housenumber'},
{'Housename': 'addr:housename'},
{'Street': 'addr:street'},
],
'Place': {
'Places': [
{'name': 'City', 'tag': 'place:city', 'geom': ['node', 'area']},
{'Town': 'place:town'},
{'Suburb': 'place:suburb'},
{'Neighborhood': 'place:neighbourhood'},
{'Village': 'place:village'},
{'Hamlet': 'place:hamlet'},
{'Isolated Dwelling': 'place:isolated_dwelling'},
{'Locality': 'place:locality'}
],
},
'Transportation': {
'Roads': [
'Roads and streets', 'Roads under construction', 'Paths'
],
'Other road objects': [
'Parking', 'Car Park', 'Parking entrance', 'Motorcycle Parking', 'Bicycle Parking', 'Roundabout'
'Mini-roundabout', 'Turning Circle', 'Bridge', 'Tunnel', 'Ford', 'Fire Hydrant', 'Traffic Signal',
'Stop', 'Pedestrian Crossing', 'Traffic Calming', 'Passing Place'
],
'Transportation means': [
'Bus Station', 'Bus Stop', 'Bus Platform', 'Taxi', 'Airport', 'Airport Ground', 'Helipad', 'Runway',
'Taxiway', 'Apron', 'Hangar', 'Terminal', 'Gate', 'Road Restrictions', 'Road Practicability'
],
'Barriers': [
'Toll Booth', 'Border Control', 'Bollard', 'Spikes', 'Hedge', 'Fence', 'Wall', 'City Wall',
'Retaining Wall', 'Entrance', 'Gate', 'Obstacles'
],
},
'Utilities': {
'Water and sanitation': [
'Drinking Water', 'Water Well', 'Spring', 'Pipeline', 'Water Storage', 'Water Tower',
'Water Works', 'Showers', 'Toilets and latrines', 'Wastewater Plant', 'Watermill', 'Water Canals',
'Wastewater canal', 'Damage prone'
],
'Power': [
'Power Generator', 'Power Station', 'Power Sub Station', 'Cable Distribution Cabinet',
'Power Tower', 'Pole', 'Power Line', 'Street Lamp'
],
'Solid waste management': [
'Landill', 'Recycling', 'Waste Disposal', 'Waste Basket'
],
'Telecommunication': [
'Radio or TV studio', 'Telecommunication antenna'
]
},
'Cultural and social': {
'Place of worship': [
'Place of worship', 'Graveyard'
],
'Community Center': [
'Community Centre',
],
'Culture': [
'Museum', 'Cinema', 'Theatre', 'Library', 'Arts Centre', 'Artwork'
],
'Historic Places': [
'Castle', 'Monument', 'Memorial'
],
'Sport Facilities': [
'Pitch', 'Stadium', 'Sports Centre', 'Racetrack', 'Golf Course', 'Miniature Golf', 'Bench'
]
},
'Public services': {
'Educational facilities': [
'Kindergarten', 'School', 'College', 'University'
],
'Health facilities': [
'Hospital', 'Clinic', 'Doctors', 'Dentist', 'Pharmacy', 'Hearing Aids', 'Baby Hatch', 'Veterinary'
],
'Social Facility': [
'Ambulatory Care', 'Assisted Living', 'Workshop', 'Food bank', 'Healthcare', 'Shelter', 'Outreach', 'Group Home'
],
'Public and government facilities': [
'Town hall', 'Courthouse', 'Customs', 'Embassy', 'Prison', 'Police', 'Fire Station', 'Post Office', 'Post Box'
]
},
'Commercial and Economic': {
'Accommodation': [
'Hotel', 'Motel', 'Guest House', 'Hostel', 'Alpine Hut', 'Caravan Site', 'Camping Site'
],
'Food+Drinks': [
'Restaurant', 'Fast Food', 'Food Court', 'Cafe', 'Pub', 'Biergarten', 'Bar', 'Nightclub', 'Brothel'
],
'Tourism': [
'Beach resort', 'Attraction', 'Viewpoint', 'Information Office', 'Map', 'Information Board'
],
'Food': [
'Supermarket', 'Convenience Store', 'Kiosk', 'Baker', 'Butcher', 'Seafood',
'Deli (Fine Food)', 'Confectionery', 'Marketplace', 'Greengrocer', 'Alcohol', 'Beverages'
],
'Clothes': [
'Clothes', 'Boutique', 'Shoes', 'Outdoor', 'Sports', 'Dry Cleaning', 'Laundry', 'Tailor', 'Fabric'
],
'Electronic': [
'Computer', 'Internet cafe', 'Electronics', 'Mobile Phone', 'Vacuum Cleaner', 'Hifi', 'Video'
],
'Vehicles': [
'Car', 'Fuel', 'Wash', 'Car Dealer', 'Repair', 'Car parts', 'Tyres', 'Rental', 'Sharing', 'Motorcycle',
'Motorcycle Dealer', 'Bicycle', 'Bike Dealer', 'Rental'
],
'Cash': [
'Bank', 'Money Transfer', 'Money Exchange', 'Automated Teller Machine', 'Lottery kiosk', 'Pawnbroker'
],
'Other': [
'Department Store', 'Mall', 'Chemist', 'Hairdresser', 'Beauty', 'Optician', 'Jewelry', 'Erotic',
'Florist', 'Garden Centre', 'Funeral directors', 'Hardware', 'Trade', 'Glaziery', 'Paint', 'Furniture',
'Kitchen', 'Curtain', 'Frame', 'Stationery', 'Copyshop', 'Book Store', 'Photo Shop', 'Recording Studio',
'Travel Agency', 'Musical Instrument', 'Toys', 'Variety Store'
],
'Craft': [
'Basket maker', 'Beekeeper', 'Blacksmith', 'Bookbinder', 'Brewery', 'Carpenter', 'Clockmaker',
'Distillery', 'Key cutter', 'Pottery', 'Saddler', 'Shoemaker', 'Stonemason', 'Upholsterer'
],
'Offices': [
'Employment Agency', 'Architect', 'Government', 'Administration', 'Foundation', 'Research',
'Estate Agent', 'IT Specialist', 'Non Government Organisation', 'Quango', 'Private Company',
'Lawyer', 'Travel Agent', 'Accountant', 'Telecommunication', 'Insurance', 'Newspaper',
'Information centre', 'Works'
]
},
# fix this..
'Landuse': {
'Various': [
'Built urban', 'Marketplace', 'Agricultural', 'Salt Pond', 'Reservoir', 'Cemetery',
'Fenced/walled area',
],
'Leisure': [
'Park', 'Garden', 'Playground'
],
},
'Physical environment': {
'Nature': [
'Peak', 'Tree', 'Wood', 'Forest', 'Nature Reserve', 'Scree', 'Fell', 'Scrub', 'Heath'
],
'Water': [
'Water', 'River', 'Stream', 'Spring', 'Waterfall', 'Wetland', 'Mud', 'Beach', 'Bay', 'Cliff'
]
},
'Hazards': {
'Hazadous Areas': [
'Flood prone', 'Landslide prone', 'Windstorm prone'
]
}
}
tags = {
'Building digitizing': [],
'Building feature': [],
'Place': {
'Places': [
'City', 'Town', 'Suburb', 'Neighborhood', 'Village', 'Hamlet', 'Isolated Dwelling', 'Locality'
],
},
'Transportation': {
'Roads': [
'Roads and streets', 'Roads under construction', 'Paths'
],
'Other road objects': [
'Parking', 'Car Park', 'Parking entrance', 'Motorcycle Parking', 'Bicycle Parking', 'Roundabout'
'Mini-roundabout', 'Turning Circle', 'Bridge', 'Tunnel', 'Ford', 'Fire Hydrant', 'Traffic Signal',
'Stop', 'Pedestrian Crossing', 'Traffic Calming', 'Passing Place'
],
'Transportation means': [
'Bus Station', 'Bus Stop', 'Bus Platform', 'Taxi', 'Airport', 'Airport Ground', 'Helipad', 'Runway',
'Taxiway', 'Apron', 'Hangar', 'Terminal', 'Gate', 'Road Restrictions', 'Road Practicability'
],
'Barriers': [
'Toll Booth', 'Border Control', 'Bollard', 'Spikes', 'Hedge', 'Fence', 'Wall', 'City Wall',
'Retaining Wall', 'Entrance', 'Gate', 'Obstacles'
],
},
'Utilities': {
'Water and sanitation': [
'Drinking Water', 'Water Well', 'Spring', 'Pipeline', 'Water Storage', 'Water Tower',
'Water Works', 'Showers', 'Toilets and latrines', 'Wastewater Plant', 'Watermill', 'Water Canals',
'Wastewater canal', 'Damage prone'
],
'Power': [
'Power Generator', 'Power Station', 'Power Sub Station', 'Cable Distribution Cabinet',
'Power Tower', 'Pole', 'Power Line', 'Street Lamp'
],
'Solid waste management': [
'Landill', 'Recycling', 'Waste Disposal', 'Waste Basket'
],
'Telecommunication': [
'Radio or TV studio', 'Telecommunication antenna'
]
},
'Cultural and social': {
'Place of worship': [
'Place of worship', 'Graveyard'
],
'Community Center': [
'Community Centre',
],
'Culture': [
'Museum', 'Cinema', 'Theatre', 'Library', 'Arts Centre', 'Artwork'
],
'Historic Places': [
'Castle', 'Monument', 'Memorial'
],
'Sport Facilities': [
'Pitch', 'Stadium', 'Sports Centre', 'Racetrack', 'Golf Course', 'Miniature Golf', 'Bench'
]
},
'Public services': {
'Educational facilities': [
'Kindergarten', 'School', 'College', 'University'
],
'Health facilities': [
'Hospital', 'Clinic', 'Doctors', 'Dentist', 'Pharmacy', 'Hearing Aids', 'Baby Hatch', 'Veterinary'
],
'Social Facility': [
'Ambulatory Care', 'Assisted Living', 'Workshop', 'Food bank', 'Healthcare', 'Shelter', 'Outreach', 'Group Home'
],
'Public and government facilities': [
'Town hall', 'Courthouse', 'Customs', 'Embassy', 'Prison', 'Police', 'Fire Station', 'Post Office', 'Post Box'
]
},
'Commercial and Economic': {
'Accommodation': [
'Hotel', 'Motel', 'Guest House', 'Hostel', 'Alpine Hut', 'Caravan Site', 'Camping Site'
],
'Food+Drinks': [
'Restaurant', 'Fast Food', 'Food Court', 'Cafe', 'Pub', 'Biergarten', 'Bar', 'Nightclub', 'Brothel'
],
'Tourism': [
'Beach resort', 'Attraction', 'Viewpoint', 'Information Office', 'Map', 'Information Board'
],
'Food': [
'Supermarket', 'Convenience Store', 'Kiosk', 'Baker', 'Butcher', 'Seafood',
'Deli (Fine Food)', 'Confectionery', 'Marketplace', 'Greengrocer', 'Alcohol', 'Beverages'
],
'Clothes': [
'Clothes', 'Boutique', 'Shoes', 'Outdoor', 'Sports', 'Dry Cleaning', 'Laundry', 'Tailor', 'Fabric'
],
'Electronic': [
'Computer', 'Internet cafe', 'Electronics', 'Mobile Phone', 'Vacuum Cleaner', 'Hifi', 'Video'
],
'Vehicles': [
'Car', 'Fuel', 'Wash', 'Car Dealer', 'Repair', 'Car parts', 'Tyres', 'Rental', 'Sharing', 'Motorcycle',
'Motorcycle Dealer', 'Bicycle', 'Bike Dealer', 'Rental'
],
'Cash': [
'Bank', 'Money Transfer', 'Money Exchange', 'Automated Teller Machine', 'Lottery kiosk', 'Pawnbroker'
],
'Other': [
'Department Store', 'Mall', 'Chemist', 'Hairdresser', 'Beauty', 'Optician', 'Jewelry', 'Erotic',
'Florist', 'Garden Centre', 'Funeral directors', 'Hardware', 'Trade', 'Glaziery', 'Paint', 'Furniture',
'Kitchen', 'Curtain', 'Frame', 'Stationery', 'Copyshop', 'Book Store', 'Photo Shop', 'Recording Studio',
'Travel Agency', 'Musical Instrument', 'Toys', 'Variety Store'
],
'Craft': [
'Basket maker', 'Beekeeper', 'Blacksmith', 'Bookbinder', 'Brewery', 'Carpenter', 'Clockmaker',
'Distillery', 'Key cutter', 'Pottery', 'Saddler', 'Shoemaker', 'Stonemason', 'Upholsterer'
],
'Offices': [
'Employment Agency', 'Architect', 'Government', 'Administration', 'Foundation', 'Research',
'Estate Agent', 'IT Specialist', 'Non Government Organisation', 'Quango', 'Private Company',
'Lawyer', 'Travel Agent', 'Accountant', 'Telecommunication', 'Insurance', 'Newspaper',
'Information centre', 'Works'
]
},
# fix this..
'Landuse': {
'Various': [
'Built urban', 'Marketplace', 'Agricultural', 'Salt Pond', 'Reservoir', 'Cemetery',
'Fenced/walled area',
],
'Leisure': [
'Park', 'Garden', 'Playground'
],
},
'Physical environment': {
'Nature': [
'Peak', 'Tree', 'Wood', 'Forest', 'Nature Reserve', 'Scree', 'Fell', 'Scrub', 'Heath'
],
'Water': [
'Water', 'River', 'Stream', 'Spring', 'Waterfall', 'Wetland', 'Mud', 'Beach', 'Bay', 'Cliff'
]
},
'Hazards': {
'Hazadous Areas': [
'Flood prone', 'Landslide prone', 'Windstorm prone'
]
}
}
HOT_HDM = OrderedDict(sorted(t.items()))
| |
import logging
import random
import threading
import time
from collections import defaultdict
from typing import Any, Callable, Dict, List, Mapping, Optional, Set
import orjson
import pika
import pika.adapters.tornado_connection
from django.conf import settings
from pika.adapters.blocking_connection import BlockingChannel
from pika.spec import Basic
from tornado import ioloop
from zerver.lib.utils import statsd
MAX_REQUEST_RETRIES = 3
Consumer = Callable[[BlockingChannel, Basic.Deliver, pika.BasicProperties, bytes], None]
# This simple queuing library doesn't expose much of the power of
# rabbitmq/pika's queuing system; its purpose is to just provide an
# interface for external files to put things into queues and take them
# out from bots without having to import pika code all over our codebase.
class SimpleQueueClient:
def __init__(self,
# Disable RabbitMQ heartbeats by default because BlockingConnection can't process them
rabbitmq_heartbeat: Optional[int] = 0,
) -> None:
self.log = logging.getLogger('zulip.queue')
self.queues: Set[str] = set()
self.channel: Optional[BlockingChannel] = None
self.consumers: Dict[str, Set[Consumer]] = defaultdict(set)
self.rabbitmq_heartbeat = rabbitmq_heartbeat
self.is_consuming = False
self._connect()
def _connect(self) -> None:
start = time.time()
self.connection = pika.BlockingConnection(self._get_parameters())
self.channel = self.connection.channel()
self.log.info(f'SimpleQueueClient connected (connecting took {time.time() - start:.3f}s)')
def _reconnect(self) -> None:
self.connection = None
self.channel = None
self.queues = set()
self._connect()
def _get_parameters(self) -> pika.ConnectionParameters:
credentials = pika.PlainCredentials(settings.RABBITMQ_USERNAME,
settings.RABBITMQ_PASSWORD)
# With BlockingConnection, we are passed
# self.rabbitmq_heartbeat=0, which asks to explicitly disable
# the RabbitMQ heartbeat feature. This is correct since that
# heartbeat doesn't make sense with BlockingConnection (we do
# need it for TornadoConnection).
#
# Where we've disabled RabbitMQ's heartbeat, the only
# keepalive on this connection is the TCP keepalive (defaults:
# `/proc/sys/net/ipv4/tcp_keepalive_*`). On most Linux
# systems, the default is to start sending keepalive packets
# after TCP_KEEPIDLE (7200 seconds) of inactivity; after that
# point, it send them every TCP_KEEPINTVL (typically 75s).
# Some Kubernetes / Docker Swarm networks can kill "idle" TCP
# connections after as little as ~15 minutes of inactivity.
# To avoid this killing our RabbitMQ connections, we set
# TCP_KEEPIDLE to something significantly below 15 minutes.
tcp_options = None
if self.rabbitmq_heartbeat == 0:
tcp_options = dict(TCP_KEEPIDLE=60 * 5)
return pika.ConnectionParameters(settings.RABBITMQ_HOST,
heartbeat=self.rabbitmq_heartbeat,
tcp_options=tcp_options,
credentials=credentials)
def _generate_ctag(self, queue_name: str) -> str:
return f"{queue_name}_{str(random.getrandbits(16))}"
def _reconnect_consumer_callback(self, queue: str, consumer: Consumer) -> None:
self.log.info(f"Queue reconnecting saved consumer {consumer} to queue {queue}")
self.ensure_queue(
queue,
lambda channel: channel.basic_consume(
queue,
consumer,
consumer_tag=self._generate_ctag(queue),
),
)
def _reconnect_consumer_callbacks(self) -> None:
for queue, consumers in self.consumers.items():
for consumer in consumers:
self._reconnect_consumer_callback(queue, consumer)
def close(self) -> None:
if self.connection:
self.connection.close()
def ready(self) -> bool:
return self.channel is not None
def ensure_queue(self, queue_name: str, callback: Callable[[BlockingChannel], None]) -> None:
'''Ensure that a given queue has been declared, and then call
the callback with no arguments.'''
if self.connection is None or not self.connection.is_open:
self._connect()
assert self.channel is not None
if queue_name not in self.queues:
self.channel.queue_declare(queue=queue_name, durable=True)
self.queues.add(queue_name)
callback(self.channel)
def publish(self, queue_name: str, body: bytes) -> None:
def do_publish(channel: BlockingChannel) -> None:
channel.basic_publish(
exchange='',
routing_key=queue_name,
properties=pika.BasicProperties(delivery_mode=2),
body=body)
statsd.incr(f"rabbitmq.publish.{queue_name}")
self.ensure_queue(queue_name, do_publish)
def json_publish(self, queue_name: str, body: Mapping[str, Any]) -> None:
data = orjson.dumps(body)
try:
self.publish(queue_name, data)
return
except pika.exceptions.AMQPConnectionError:
self.log.warning("Failed to send to rabbitmq, trying to reconnect and send again")
self._reconnect()
self.publish(queue_name, data)
def start_json_consumer(self,
queue_name: str,
callback: Callable[[List[Dict[str, Any]]], None],
batch_size: int=1,
timeout: Optional[int]=None) -> None:
if batch_size == 1:
timeout = None
def do_consume(channel: BlockingChannel) -> None:
events: List[Dict[str, Any]] = []
last_process = time.time()
max_processed: Optional[int] = None
self.is_consuming = True
# This iterator technique will iteratively collect up to
# batch_size events from the RabbitMQ queue (if present)
# before calling the callback with the batch. If not
# enough events are present, it will sleep for at most
# timeout seconds before calling the callback with the
# batch of events it has.
for method, properties, body in channel.consume(queue_name, inactivity_timeout=timeout):
if body is not None:
events.append(orjson.loads(body))
max_processed = method.delivery_tag
now = time.time()
if len(events) >= batch_size or (timeout and now >= last_process + timeout):
if events:
try:
callback(events)
channel.basic_ack(max_processed, multiple=True)
except BaseException:
channel.basic_nack(max_processed, multiple=True)
raise
events = []
last_process = now
if not self.is_consuming:
break
self.ensure_queue(queue_name, do_consume)
def local_queue_size(self) -> int:
assert self.channel is not None
return self.channel.get_waiting_message_count() + len(self.channel._pending_events)
def stop_consuming(self) -> None:
assert self.channel is not None
assert self.is_consuming
self.is_consuming = False
self.channel.stop_consuming()
# Patch pika.adapters.tornado_connection.TornadoConnection so that a socket error doesn't
# throw an exception and disconnect the tornado process from the rabbitmq
# queue. Instead, just re-connect as usual
class ExceptionFreeTornadoConnection(pika.adapters.tornado_connection.TornadoConnection):
def _adapter_disconnect(self) -> None:
try:
super()._adapter_disconnect()
except (pika.exceptions.ProbableAuthenticationError,
pika.exceptions.ProbableAccessDeniedError,
pika.exceptions.IncompatibleProtocolError):
logging.warning("Caught exception in ExceptionFreeTornadoConnection when \
calling _adapter_disconnect, ignoring", exc_info=True)
class TornadoQueueClient(SimpleQueueClient):
# Based on:
# https://pika.readthedocs.io/en/0.9.8/examples/asynchronous_consumer_example.html
def __init__(self) -> None:
super().__init__(
# TornadoConnection can process heartbeats, so enable them.
rabbitmq_heartbeat=None)
self._on_open_cbs: List[Callable[[BlockingChannel], None]] = []
self._connection_failure_count = 0
def _connect(self) -> None:
self.log.info("Beginning TornadoQueueClient connection")
self.connection = ExceptionFreeTornadoConnection(
self._get_parameters(),
on_open_callback = self._on_open,
on_open_error_callback = self._on_connection_open_error,
on_close_callback = self._on_connection_closed,
)
def _reconnect(self) -> None:
self.connection = None
self.channel = None
self.queues = set()
self.log.warning("TornadoQueueClient attempting to reconnect to RabbitMQ")
self._connect()
CONNECTION_RETRY_SECS = 2
# When the RabbitMQ server is restarted, it's normal for it to
# take a few seconds to come back; we'll retry a few times and all
# will be well. So for the first few failures, we report only at
# "warning" level, avoiding an email to the server admin.
#
# A loss of an existing connection starts a retry loop just like a
# failed connection attempt, so it counts as the first failure.
#
# On an unloaded test system, a RabbitMQ restart takes about 6s,
# potentially causing 4 failures. We add some headroom above that.
CONNECTION_FAILURES_BEFORE_NOTIFY = 10
def _on_connection_open_error(self, connection: pika.connection.Connection,
reason: Exception) -> None:
self._connection_failure_count += 1
retry_secs = self.CONNECTION_RETRY_SECS
self.log.log(
logging.CRITICAL
if self._connection_failure_count > self.CONNECTION_FAILURES_BEFORE_NOTIFY
else logging.WARNING,
"TornadoQueueClient couldn't connect to RabbitMQ, retrying in %d secs...",
retry_secs,
)
ioloop.IOLoop.instance().call_later(retry_secs, self._reconnect)
def _on_connection_closed(self, connection: pika.connection.Connection,
reason: Exception) -> None:
self._connection_failure_count = 1
retry_secs = self.CONNECTION_RETRY_SECS
self.log.warning(
"TornadoQueueClient lost connection to RabbitMQ, reconnecting in %d secs...",
retry_secs,
)
ioloop.IOLoop.instance().call_later(retry_secs, self._reconnect)
def _on_open(self, connection: pika.connection.Connection) -> None:
self._connection_failure_count = 0
try:
self.connection.channel(
on_open_callback = self._on_channel_open)
except pika.exceptions.ConnectionClosed:
# The connection didn't stay open long enough for this code to get to it.
# Let _on_connection_closed deal with trying again.
self.log.warning("TornadoQueueClient couldn't open channel: connection already closed")
def _on_channel_open(self, channel: BlockingChannel) -> None:
self.channel = channel
for callback in self._on_open_cbs:
callback(channel)
self._reconnect_consumer_callbacks()
self.log.info('TornadoQueueClient connected')
def ensure_queue(self, queue_name: str, callback: Callable[[BlockingChannel], None]) -> None:
def finish(frame: Any) -> None:
assert self.channel is not None
self.queues.add(queue_name)
callback(self.channel)
if queue_name not in self.queues:
# If we're not connected yet, send this message
# once we have created the channel
if not self.ready():
self._on_open_cbs.append(lambda channel: self.ensure_queue(queue_name, callback))
return
assert self.channel is not None
self.channel.queue_declare(queue=queue_name, durable=True, callback=finish)
else:
assert self.channel is not None
callback(self.channel)
def start_json_consumer(self,
queue_name: str,
callback: Callable[[List[Dict[str, Any]]], None],
batch_size: int=1,
timeout: Optional[int]=None) -> None:
def wrapped_consumer(ch: BlockingChannel,
method: Basic.Deliver,
properties: pika.BasicProperties,
body: bytes) -> None:
callback([orjson.loads(body)])
ch.basic_ack(delivery_tag=method.delivery_tag)
assert batch_size == 1
assert timeout is None
self.consumers[queue_name].add(wrapped_consumer)
if not self.ready():
return
self.ensure_queue(
queue_name,
lambda channel: channel.basic_consume(
queue_name,
wrapped_consumer,
consumer_tag=self._generate_ctag(queue_name),
),
)
queue_client: Optional[SimpleQueueClient] = None
def get_queue_client() -> SimpleQueueClient:
global queue_client
if queue_client is None:
if settings.RUNNING_INSIDE_TORNADO and settings.USING_RABBITMQ:
queue_client = TornadoQueueClient()
elif settings.USING_RABBITMQ:
queue_client = SimpleQueueClient()
else:
raise RuntimeError("Cannot get a queue client without USING_RABBITMQ")
return queue_client
# We using a simple lock to prevent multiple RabbitMQ messages being
# sent to the SimpleQueueClient at the same time; this is a workaround
# for an issue with the pika BlockingConnection where using
# BlockingConnection for multiple queues causes the channel to
# randomly close.
queue_lock = threading.RLock()
def queue_json_publish(
queue_name: str,
event: Dict[str, Any],
processor: Optional[Callable[[Any], None]] = None,
) -> None:
with queue_lock:
if settings.USING_RABBITMQ:
get_queue_client().json_publish(queue_name, event)
elif processor:
processor(event)
else:
# Must be imported here: A top section import leads to circular imports
from zerver.worker.queue_processors import get_worker
get_worker(queue_name).consume_single_event(event)
def retry_event(queue_name: str,
event: Dict[str, Any],
failure_processor: Callable[[Dict[str, Any]], None]) -> None:
if 'failed_tries' not in event:
event['failed_tries'] = 0
event['failed_tries'] += 1
if event['failed_tries'] > MAX_REQUEST_RETRIES:
failure_processor(event)
else:
queue_json_publish(queue_name, event, lambda x: None)
| |
"""
Glue code for logic around calling associated backend
libraries to support the dcos installer.
"""
import json
import logging
import os
import botocore.exceptions
import gen
import gen.build_deploy.aws
import gen.calc
import release
import release.storage.aws
import release.storage.local
from dcos_installer import config_util, upgrade
from dcos_installer.config import (
Config,
normalize_config_validation,
normalize_config_validation_exception,
)
from dcos_installer.constants import CONFIG_PATH, GENCONF_DIR
from gen.exceptions import ExhibitorTLSBootstrapError, ValidationError
log = logging.getLogger()
def print_messages(messages):
for key, error in messages.items():
log.error('{}: {}'.format(key, error))
def do_configure(config_path=CONFIG_PATH):
"""Returns error code
:param config_path: path to config.yaml
:type config_path: string | CONFIG_PATH (genconf/config.yaml)
"""
config = Config(config_path)
try:
gen_out = config_util.onprem_generate(config)
except ValidationError as e:
validation = normalize_config_validation_exception(e)
print_messages(validation)
return 1
except ExhibitorTLSBootstrapError as e:
log.error('Failed to bootstrap Exhibitor TLS')
for i, error in enumerate(e.errors):
return log.error("{}: {}".format(i + 1, error))
return 1
config_util.make_serve_dir(gen_out)
return 0
def generate_node_upgrade_script(installed_cluster_version, config_path=CONFIG_PATH):
if installed_cluster_version is None:
print('Must provide the version of the cluster upgrading from')
return 1
config = Config(config_path)
try:
gen_out = config_util.onprem_generate(config)
except ValidationError as e:
validation = normalize_config_validation_exception(e)
print_messages(validation)
return 1
except ExhibitorTLSBootstrapError as e:
log.error('Failed to bootstrap Exhibitor TLS')
for i, error in enumerate(e.errors):
return log.error("{}: {}".format(i + 1, error))
return 1
config_util.make_serve_dir(gen_out)
# generate the upgrade script
upgrade.generate_node_upgrade_script(gen_out, installed_cluster_version)
return 0
# Taken from: http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
# In the same order as that document.
region_to_endpoint = {
'us-east-1': 's3.amazonaws.com',
'us-west-1': 's3-us-west-1.amazonaws.com',
'us-west-2': 's3-us-west-2.amazonaws.com',
'ap-south-1': 's3.ap-south-1.amazonaws.com',
'ap-northeast-2': 's3.ap-northeast-2.amazonaws.com',
'ap-southeast-1': 's3-ap-southeast-1.amazonaws.com',
'ap-southeast-2': 's3-ap-southeast-2.amazonaws.com',
'ap-northeast-1': 's3-ap-northeast-1.amazonaws.com',
'eu-central-1': 's3.eu-central-1.amazonaws.com',
'eu-west-1': 's3-eu-west-1.amazonaws.com',
'sa-east-1': 's3-sa-east-1.amazonaws.com',
'us-gov-west-1': 's3-us-gov-west-1.amazonaws.com'
}
def validate_aws_template_storage_region_name(aws_template_storage_region_name):
assert aws_template_storage_region_name in region_to_endpoint, \
"Unsupported AWS region {}. Only {} are supported".format(
aws_template_storage_region_name,
region_to_endpoint.keys())
def validate_aws_bucket_access(aws_template_storage_region_name,
aws_template_storage_access_key_id,
aws_template_storage_secret_access_key,
aws_template_storage_bucket,
aws_template_storage_bucket_path,
aws_template_storage_bucket_path_autocreate):
session = release.storage.aws.get_aws_session(
aws_template_storage_access_key_id,
aws_template_storage_secret_access_key,
aws_template_storage_region_name)
bucket = session.resource('s3').Bucket(aws_template_storage_bucket)
try:
bucket.load()
except botocore.exceptions.ClientError as ex:
if ex.response['Error']['Code'] == '404':
raise AssertionError("s3 bucket {} does not exist".format(aws_template_storage_bucket)) from ex
raise AssertionError("Unable to access s3 bucket {} in region {}: {}".format(
aws_template_storage_bucket, aws_template_storage_region_name, ex)) from ex
# If autocreate is on, then skip ensuring the path exists
if not aws_template_storage_bucket_path_autocreate:
try:
bucket.Object(aws_template_storage_bucket_path).load()
except botocore.exceptions.ClientError as ex:
if ex.response['Error']['Code'] == '404':
raise AssertionError(
"path `{}` in bucket `{}` does not exist. Create it or set "
"aws_template_storage_bucket_path_autocreate to true".format(
aws_template_storage_bucket_path, aws_template_storage_bucket))
raise AssertionError("Unable to access s3 path {} in bucket {}: {}".format(
aws_template_storage_bucket_path, aws_template_storage_bucket, ex)) from ex
def calculate_base_repository_url(
aws_template_storage_region_name,
aws_template_storage_bucket,
aws_template_storage_bucket_path):
return 'https://{domain}/{bucket}/{path}'.format(
domain=region_to_endpoint[aws_template_storage_region_name],
bucket=aws_template_storage_bucket,
path=aws_template_storage_bucket_path)
def calculate_aws_template_storage_region_name(
aws_template_storage_access_key_id,
aws_template_storage_secret_access_key,
aws_template_storage_bucket):
session = release.storage.aws.get_aws_session(
aws_template_storage_access_key_id,
aws_template_storage_secret_access_key)
try:
location_info = session.client('s3').get_bucket_location(Bucket=aws_template_storage_bucket)
loc = location_info["LocationConstraint"]
if loc is None or loc.strip() == "":
# If a buckets region is in fact 'us-east-1' the response from the api will actually be an empty value?!
# Rather than returning the empty value on to we set it to 'us-east-1'.
# See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html#RESTBucketGETlocation-responses-response-elements # noqa
return "us-east-1"
else:
return loc
except botocore.exceptions.ClientError as ex:
if ex.response['Error']['Code'] == '404':
raise AssertionError("s3 bucket {} does not exist".format(aws_template_storage_bucket)) from ex
raise AssertionError("Unable to determine region location of s3 bucket {}: {}".format(
aws_template_storage_bucket, ex)) from ex
aws_advanced_source = gen.internals.Source({
# TODO(cmaloney): Add parameter validation for AWS Advanced template output.
'validate': [
lambda aws_template_upload: gen.calc.validate_true_false(aws_template_upload),
lambda aws_template_storage_bucket_path_autocreate:
gen.calc.validate_true_false(aws_template_storage_bucket_path_autocreate),
validate_aws_template_storage_region_name,
validate_aws_bucket_access
],
'default': {
'num_masters': '5',
'aws_template_upload': 'true',
'aws_template_storage_bucket_path_autocreate': 'true',
'bootstrap_id': lambda: gen.calc.calculate_environment_variable('BOOTSTRAP_ID'),
# TODO(cmaloney): Add defaults for getting AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY from the
# environment to set as keys. Not doing for now since they would need to be passed through
# the `docker run` inside dcos_generate_config.sh
'aws_template_storage_access_key_id': '',
'aws_template_storage_secret_access_key': '',
},
'must': {
'provider': 'aws',
'package_ids': lambda bootstrap_variant: json.dumps(
config_util.installer_latest_complete_artifact(bootstrap_variant)['packages']
),
'bootstrap_url': calculate_base_repository_url,
},
'secret': [
'aws_template_storage_access_key_id',
'aws_template_storage_secret_access_key',
],
'conditional': {
'aws_template_upload': {
'true': {
'default': {
'aws_template_storage_region_name': calculate_aws_template_storage_region_name
}
},
'false': {}
}
}
})
def get_aws_advanced_target():
return gen.internals.Target(
variables={
# TODO(cmaloney): Namespacing would be really handy here...
'aws_template_storage_bucket',
'aws_template_storage_bucket_path',
'aws_template_upload',
'aws_template_storage_bucket_path_autocreate',
'provider',
'bootstrap_url',
'bootstrap_variant',
'package_ids'},
sub_scopes={
'aws_template_upload': gen.internals.Scope(
name='aws_template_upload',
cases={
'true': gen.internals.Target({
'aws_template_storage_access_key_id',
'aws_template_storage_secret_access_key',
'aws_template_storage_region_name'
}),
'false': gen.internals.Target()
}
)
}
)
# TODO(cmaloney): Make it so validation happens using the provided AWS credentials.
def do_aws_cf_configure():
"""Returns error code
Generates AWS templates using a custom config.yaml
"""
# TODO(cmaloney): Move to Config class introduced in https://github.com/dcos/dcos/pull/623
config = Config(CONFIG_PATH)
# This process is usually ran from a docker container where default boto3 credential
# method may fail and as such, we allow passing these creds explicitly
if 'aws_template_storage_access_key_id' in config:
os.environ['AWS_ACCESS_KEY_ID'] = config['aws_template_storage_access_key_id']
if 'aws_template_storage_secret_access_key' in config:
os.environ['AWS_SECRET_ACCESS_KEY'] = config['aws_template_storage_secret_access_key']
if 'aws_template_storage_region_name' in config:
os.environ['AWS_DEFAULT_REGION'] = config['aws_template_storage_region_name']
gen_config = config.as_gen_format()
extra_sources = [
gen.build_deploy.aws.aws_base_source,
aws_advanced_source,
gen.build_deploy.aws.groups['master'][1]]
sources, targets, _ = gen.get_dcosconfig_source_target_and_templates(gen_config, [], extra_sources)
targets.append(get_aws_advanced_target())
resolver = gen.internals.resolve_configuration(sources, targets)
# TODO(cmaloney): kill this function and make the API return the structured
# results api as was always intended rather than the flattened / lossy other
# format. This will be an API incompatible change. The messages format was
# specifically so that there wouldn't be this sort of API incompatibility.
messages = normalize_config_validation(resolver.status_dict)
if messages:
print_messages(messages)
return 1
# TODO(cmaloney): This is really hacky but a lot simpler than merging all the config flows into
# one currently.
# Get out the calculated arguments and manually move critical calculated ones to the gen_config
# object.
# NOTE: the copying across, as well as validation is guaranteed to succeed because we've already
# done a validation run.
full_config = {k: v.value for k, v in resolver.arguments.items()}
# Calculate the config ID and values that depend on it.
config_id = gen.get_config_id(full_config)
reproducible_artifact_path = 'config_id/{}'.format(config_id)
cloudformation_s3_url = '{}/config_id/{}'.format(full_config['bootstrap_url'], config_id)
cloudformation_s3_url_full = '{}/cloudformation'.format(cloudformation_s3_url)
# TODO(cmaloney): Switch to using the targets
gen_config['bootstrap_url'] = full_config['bootstrap_url']
gen_config['provider'] = full_config['provider']
gen_config['bootstrap_id'] = full_config['bootstrap_id']
gen_config['package_ids'] = full_config['package_ids']
gen_config['cloudformation_s3_url_full'] = cloudformation_s3_url_full
# Convert the bootstrap_Variant string we have back to a bootstrap_id as used internally by all
# the tooling (never has empty string, uses None to say "no variant")
bootstrap_variant = full_config['bootstrap_variant'] if full_config['bootstrap_variant'] else None
artifacts = list()
for built_resource in list(gen.build_deploy.aws.do_create(
tag='dcos_generate_config.sh --aws-cloudformation',
build_name='Custom',
reproducible_artifact_path=reproducible_artifact_path,
variant_arguments={bootstrap_variant: gen_config},
commit=full_config['dcos_image_commit'],
all_completes=None)):
artifacts += release.built_resource_to_artifacts(built_resource)
artifacts += list(release.make_bootstrap_artifacts(
full_config['bootstrap_id'],
json.loads(full_config['package_ids']),
bootstrap_variant,
'artifacts',
))
for package_id in json.loads(full_config['package_ids']):
package_filename = release.make_package_filename(package_id)
artifacts.append({
'reproducible_path': package_filename,
'local_path': 'artifacts/' + package_filename,
})
# Upload all the artifacts to the config-id path and then print out what
# the path that should be used is, as well as saving a local json file for
# easy machine access / processing.
repository = release.Repository(
full_config['aws_template_storage_bucket_path'],
None,
'config_id/' + config_id)
storage_commands = repository.make_commands({'core_artifacts': [], 'channel_artifacts': artifacts})
cf_dir = GENCONF_DIR + '/cloudformation'
log.warning("Writing local copies to {}".format(cf_dir))
storage_provider = release.storage.local.LocalStorageProvider(cf_dir)
release.apply_storage_commands({'local': storage_provider}, storage_commands)
log.warning(
"Generated templates locally available at %s",
cf_dir + "/" + reproducible_artifact_path)
# TODO(cmaloney): Print where the user can find the files locally
if full_config['aws_template_upload'] == 'false':
return 0
storage_provider = release.storage.aws.S3StorageProvider(
bucket=full_config['aws_template_storage_bucket'],
object_prefix=None,
download_url=cloudformation_s3_url,
region_name=full_config['aws_template_storage_region_name'],
access_key_id=full_config['aws_template_storage_access_key_id'],
secret_access_key=full_config['aws_template_storage_secret_access_key'])
log.warning("Uploading to AWS")
release.apply_storage_commands({'aws': storage_provider}, storage_commands)
log.warning("AWS CloudFormation templates now available at: {}".format(cloudformation_s3_url))
# TODO(cmaloney): Print where the user can find the files in AWS
# TODO(cmaloney): Dump out a JSON with machine paths to make scripting easier.
return 0
def determine_config_type(config_path=CONFIG_PATH):
"""Returns the configuration type to the UI. One of either 'minimal' or
'advanced'. 'advanced' blocks UI usage.
:param config_path: path to config.yaml
:type config_path: str | CONFIG_PATH (genconf/config.yaml)
"""
# TODO(cmaloney): If the config has any arguments not in the set of possible parameters then
# the config is always advanced.
config = Config(config_path)
adv_found = {}
advanced_cluster_config = {
"bootstrap_url": 'file:///opt/dcos_install_tmp',
"docker_remove_delay": None,
"exhibitor_storage_backend": 'static',
"gc_delay": None,
"master_discovery": 'static',
"roles": None,
"weights": None
}
for key, value in advanced_cluster_config.items():
# Skip if the key isn't in config
if key not in config:
continue
# None indicates any value means this is advanced config.
# A string indicates the value must match.
if value is None:
log.error('Advanced configuration found in config.yaml: {}: value'.format(key, value))
adv_found[key] = config[key]
elif value != config[key]:
log.error('Advanced configuration found in config.yaml: {}: value'.format(key, config[key]))
adv_found[key] = config[key]
if adv_found:
message = (
"Advanced configuration detected in {config_path} ({adv_found}).\nPlease backup "
"or remove {config_path} to use the UI installer.".format(
config_path=CONFIG_PATH,
adv_found=adv_found,
)
)
config_type = 'advanced'
else:
message = ''
config_type = 'minimal'
return {
'message': message,
'type': config_type
}
def success(config: Config):
"""Returns the data for /success/ endpoint.
:param config_path: path to config.yaml
:type config_path: str | CONFIG_PATH (genconf/config.yaml)
"""
master_ips = config.hacky_default_get('master_list', [])
agent_ips = config.hacky_default_get('agent_list', [])
code = 200
msgs = {
'success': "",
'master_count': 0,
'agent_count': 0
}
if not master_ips or not agent_ips:
code = 400
return msgs, code
msgs['success'] = 'http://{}'.format(master_ips[0])
msgs['master_count'] = len(master_ips)
msgs['agent_count'] = len(agent_ips)
return msgs, code
| |
# Copyright(c) 2014, The LIMIX developers (Christoph Lippert, Paolo Francesco Casale, Oliver Stegle)
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
import scipy as sp
import numpy as np
import pdb
import pylab as pl
import matplotlib.pylab as plt
import scipy.stats as st
import copy
import os
import pickle
import glob
def plot_manhattan(posCum,pv,chromBounds=None,
thr=None,qv=None,lim=None,xticklabels=True,
alphaNS=0.1,alphaS=0.5,colorNS='DarkBlue',colorS='Orange',plt=None,thr_plotting=None,labelS=None,labelNS=None):
"""
This script makes a manhattan plot
-------------------------------------------
posCum cumulative position
pv pvalues
chromBounds chrom boundaries (optionally). If not supplied, everything will be plotted into a single chromosome
qv qvalues
if provided, threshold for significance is set on qvalues but pvalues are plotted
thr threshold for significance
default: 0.01 bonferroni correceted significance levels if qvs are not specified,
or 0.01 on qvs if qvs specified
lim top limit on y-axis
if not provided, -1.2*log(pv.min()) is taken
xticklabels if true, xtick labels are printed
alphaNS transparency of non-significant SNPs
alphaS transparency of significant SNPs
plt matplotlib.axes.AxesSubplot, the target handle for this figure (otherwise current axes)
thr_plotting plot only P-values that are smaller than thr_plotting to speed up plotting
labelS optional plotting label (significant loci)
labelNS optional plotting label (non significnat loci)
"""
if plt is None:
plt = pl.gca()
if thr==None:
thr = 0.01/float(posCum.shape[0])
if lim==None:
lim=-1.2*sp.log10(sp.minimum(pv.min(),thr))
if chromBounds is None:
chromBounds = sp.array([[0,posCum.max()]])
else:
chromBounds = sp.concatenate([chromBounds,sp.array([posCum.max()])])
n_chroms = chromBounds.shape[0]
for chrom_i in range(0,n_chroms-1,2):
pl.fill_between(posCum,0,lim,where=(posCum>chromBounds[chrom_i]) & (posCum<chromBounds[chrom_i+1]),facecolor='LightGray',linewidth=0,alpha=0.5)
if thr_plotting is not None:
if pv is not None:
i_small = pv<thr_plotting
elif qv is not None:
i_small = qv<thr_plotting
if qv is not None:
qv = qv[i_small]
if pv is not None:
pv = pv[i_small]
if posCum is not None:
posCum=posCum[i_small]
if qv==None:
Isign = pv<thr
else:
Isign = qv<thr
pl.plot(posCum[~Isign],-sp.log10(pv[~Isign]),'.',color=colorNS,ms=5,alpha=alphaNS,label=labelNS)
pl.plot(posCum[Isign], -sp.log10(pv[Isign]), '.',color=colorS,ms=5,alpha=alphaS,label=labelS)
if qv is not None:
pl.plot([0,posCum.max()],[-sp.log10(thr),-sp.log10(thr)],'--',color='Gray')
pl.ylim(0,lim)
pl.ylabel('-log$_{10}$pv')
pl.xlim(0,posCum.max())
xticks = sp.array([chromBounds[i:i+2].mean() for i in range(chromBounds.shape[0]-1)])
plt.set_xticks(xticks)
pl.xticks(fontsize=6)
if xticklabels:
plt.set_xticklabels(sp.arange(1,n_chroms+1))
pl.xlabel('genetic position')
else:
plt.set_xticklabels([])
plt.spines["right"].set_visible(False)
plt.spines["top"].set_visible(False)
plt.xaxis.set_ticks_position('bottom')
plt.yaxis.set_ticks_position('left')
def _qqplot_bar(M=1000000, alphaLevel = 0.05, distr = 'log10'):
"""calculate theoretical expectations for qqplot"""
mRange=10**(sp.arange(sp.log10(0.5),sp.log10(M-0.5)+0.1,0.1));#should be exp or 10**?
numPts=len(mRange);
betaalphaLevel=sp.zeros(numPts);#down in the plot
betaOneMinusalphaLevel=sp.zeros(numPts);#up in the plot
betaInvHalf=sp.zeros(numPts);
for n in range(numPts):
m=mRange[n]; #numPLessThanThresh=m;
betaInvHalf[n]=st.beta.ppf(0.5,m,M-m);
betaalphaLevel[n]=st.beta.ppf(alphaLevel,m,M-m);
betaOneMinusalphaLevel[n]=st.beta.ppf(1-alphaLevel,m,M-m);
betaDown=betaInvHalf-betaalphaLevel;
betaUp=betaOneMinusalphaLevel-betaInvHalf;
theoreticalPvals=mRange/M;
return betaUp, betaDown, theoreticalPvals
def qqplot(pv, distr = 'log10', alphaLevel = 0.05):
"""
This script makes a Quantile-Quantile plot of the observed
negative log P-value distribution against the theoretical one under the null.
Input:
pv pvalues (numpy array)
distr scale of the distribution (log10 or chi2)
alphaLevel significance bounds
"""
shape_ok = (len(pv.shape)==1) or ((len(pv.shape)==2) and pv.shape[1]==1)
assert shape_ok, 'qqplot requires a 1D array of p-values'
tests = pv.shape[0]
pnull = (0.5 + sp.arange(tests))/tests
# pnull = np.sort(np.random.uniform(size = tests))
Ipv = sp.argsort(pv)
if distr == 'chi2':
qnull = sp.stats.chi2.isf(pnull, 1)
qemp = (sp.stats.chi2.isf(pv[Ipv],1))
xl = 'LOD scores'
yl = '$\chi^2$ quantiles'
if distr == 'log10':
qnull = -sp.log10(pnull)
qemp = -sp.log10(pv[Ipv])
xl = '-log10(P) observed'
yl = '-log10(P) expected'
line = plt.plot(qnull, qemp, '.')[0]
#plt.plot([0,qemp.m0x()], [0,qemp.max()],'r')
plt.plot([0,qnull.max()], [0,qnull.max()],'r')
plt.ylabel(xl)
plt.xlabel(yl)
if alphaLevel is not None:
if distr == 'log10':
betaUp, betaDown, theoreticalPvals = _qqplot_bar(M=tests,alphaLevel=alphaLevel,distr=distr)
lower = -sp.log10(theoreticalPvals-betaDown)
upper = -sp.log10(theoreticalPvals+betaUp)
plt.fill_between(-sp.log10(theoreticalPvals),lower,upper,color='grey',alpha=0.5)
#plt.plot(-sp.log10(theoreticalPvals),lower,'g-.')
#plt.plot(-sp.log10(theoreticalPvals),upper,'g-.')
return line
def plot_normal(x=None, mean_x=None,std_x=None,color='red',linewidth=2,alpha=1,bins=20,xlim=False,plot_mean=True,plot_std=False,plot_2std=True,figure=None,annotate=True,histogram=True):
"""
plot a fit of a normal distribution to the data in x.
"""
import pylab
if figure is None:
figure=pylab.figure()
if mean_x is None:
#fit maximum likelihood Normal distribution mean to samples X
mean_x = x.mean() #sample mean
if std_x is None:
#fit maximum likelihood Normal distribution standard deviation to samples X
std_x = x.std() #sample standard deviation
xvals=np.arange(mean_x-5*std_x,mean_x+5*std_x,.001)
yvals=st.norm.pdf(xvals,mean_x,std_x)
#plot normal distribution:
ax = pylab.plot(xvals,yvals,color=color,linewidth=linewidth,alpha=alpha)
if x is not None and histogram:
#plot histogram of x-values
pylab.hist(x,bins,normed=True)
if plot_mean:
#evaluate distribution at the mean:
max_cdf=st.norm.pdf(mean_x,mean_x,std_x)
pylab.plot([mean_x,mean_x],[0,max_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu$', xy=(mean_x+0.6*std_x, 1.0*max_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if plot_std:#plot mean +- 1*standard deviation (64% interval)
std_cdf=st.norm.pdf(mean_x+std_x,mean_x,std_x)
pylab.plot([mean_x+std_x,mean_x+std_x],[0,std_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
pylab.plot([mean_x-std_x,mean_x-std_x],[0,std_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu+\sigma$', xy=(mean_x+1.6*std_x, 1.5*std_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if plot_2std:#plot mean +- 2*standard deviations (95% interval)
std2_cdf=st.norm.pdf(mean_x+2*std_x,mean_x,std_x)
pylab.plot([mean_x+2*std_x,mean_x+2*std_x],[0,std2_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
pylab.plot([mean_x-2*std_x,mean_x-2*std_x],[0,std2_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--")
if annotate:
pylab.annotate('$\mu+2\sigma$', xy=(mean_x+2.6*std_x, 1.5*std2_cdf),
horizontalalignment='center', verticalalignment='center',fontsize=15,color=color)
if xlim: #cut of unused space on y-axis
pylab.xlim([mean_x-4*std_x,mean_x+4*std_x])
return figure
| |
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from MaudeMiner.core.database.base import base
def create(engine, tables=[]):
print " === Creating Table(s) === "
for t in tables:
print " " + str(t)
base.metadata.create_all(engine, tables=tables)
print "Done."
def drop(engine, tables=[]):
print " === Dropping Table(s) === "
for t in tables:
print " " + str(t)
base.metadata.drop_all(engine, tables=tables)
print "Done."
class Event(base):
"""
Comes from mdrfoi*.txt
"""
__tablename__ = "Events"
report_key = Column(Integer, primary_key=True)
event_key = Column(Integer)
report_number = Column(String)
report_source_code = Column(String)
manufacturer_link_flag = Column(String)
number_devices_in_event = Column(Integer)
number_patients_in_event = Column(Integer)
date_received = Column(String)
# Section B
adverse_event_flag = Column(String)
product_problem_flag = Column(String)
date_report = Column(String)
date_of_event = Column(String)
single_use_flag = Column(String)
reporter_occupation_code = Column(String)
# Section E
health_professional = Column(String)
initial_report_to_fda = Column(String)
# Section F
distributor_id = Column(Integer, ForeignKey('Contacts.id'))
date_facility_aware = Column(String)
type_of_report = Column(String)
report_date = Column(String)
report_to_fda = Column(String)
date_report_to_fda = Column(String)
event_location = Column(String)
report_to_manufacturer = Column(String)
date_report_to_manufacturer = Column(String)
manufacturer_id = Column(Integer, ForeignKey('Contacts.id'))
# Section G
manufacturer_contact_id = Column(Integer, ForeignKey('Contacts.id'))
manufacturer_g1_id = Column(Integer, ForeignKey('Contacts.id'))
source_type = Column(String)
date_manufacturer_received = Column(String)
# Section H
device_date_of_manufacture = Column(String)
single_use_flag = Column(String)
remedial_action = Column(String)
previous_use_code = Column(String)
removal_correction_number = Column(Integer)
event_type = Column(String)
class Contact(base):
"""
"""
__tablename__ = "Contacts"
nextId = 0
id = Column(Integer, primary_key=True, autoincrement='ignore_fk')
title_name = Column(String)
name = Column(String)
street_1 = Column(String)
street_2 = Column(String)
city = Column(String)
state_code = Column(String)
zip_code = Column(String)
zip_code_ext = Column(String)
country_code = Column(String)
postal_code = Column(String)
phone_no_area_code = Column(String)
phone_no_exchange = Column(String)
phone_no = Column(String)
phone_no_ext = Column(String)
phone_no_country_code = Column(String)
phone_no_city_code = Column(String)
phone_no_local = Column(String)
def __repr__(self):
return "<Contact(%s,%s,%s)>" %(self.id, self.name, self.street_1)
class DeviceProblemCode(base):
"""
Comes from deviceproblemcodes.txt
"""
__tablename__ = "DeviceProblemCodes"
code = Column(Integer, primary_key=True, autoincrement=False)
description = Column(String)
def __init__(self, code=None, description=None):
self.code = code
self.description = description
def __repr__(self):
return "<DeviceProblemCode('%s','%s')>" %(self.code, self.description)
class Device(base):
"""
Comes from foidev*.txt (except foidevproblem.txt)
"""
__tablename__ = "Devices"
id = Column(Integer, primary_key=True)
report_key = Column(Integer, ForeignKey('Events.report_key'))
device_event_key = Column(Integer)
implant_flag = Column(String)
date_removed_flag = Column(String)
device_sequence_number = Column(String)
date_received = Column(String)
# Section D
brand_name = Column(String)
generic_name = Column(String)
manufacturer_name = Column(String)
manufacturer_address_1 = Column(String)
manufacturer_address_2 = Column(String)
manufacturer_city = Column(String)
manufacturer_state_code = Column(String)
manufacturer_zip_code = Column(String)
manufacturer_zip_code_ext = Column(String)
manufacturer_country_code = Column(String)
manufacturer_postal_code = Column(String)
expiration_date_of_device = Column(String)
model_number = Column(String)
lot_number = Column(String)
catalog_number = Column(String)
other_id_number = Column(String)
device_operator = Column(String)
device_availability = Column(String)
date_reported_to_manufacturer = Column(String)
device_report_product_code = Column(String)
device_age = Column(String)
device_evaluated_by_manufacturer = Column(String)
# Baseline Section
baseline_brand_name = Column(String)
baseline_generic_name = Column(String)
baseline_model_number = Column(String)
baseline_catalog_number = Column(String)
baseline_other_id_number = Column(String)
baseline_device_family = Column(String)
baseline_shelf_life_contained_in_label = Column(String)
baseline_shelf_life_in_months = Column(String)
baseline_pma_flag = Column(String)
baseline_pma_number = Column(String)
baseline_510k_flag = Column(String)
baseline_510k_number = Column(String)
baseline_preamendment = Column(String)
baseline_transitional = Column(String)
baseline_510k_exempt_flag = Column(String)
baseline_date_first_marketed = Column(String)
baseline_date_ceased_marketing = Column(String)
class DeviceProblem(base):
"""
Comes from foidevproblem.txt
"""
__tablename__ = "DeviceProblems"
report_key = Column(Integer, ForeignKey('Events.report_key'), primary_key=True, autoincrement=False)
code = Column(Integer, ForeignKey('DeviceProblemCodes.code'), primary_key=True, autoincrement=False)
def __repr__(self):
return "<DeviceProblem('%s','%s')>" %(self.report_key, self.device_problem_code)
class Narrative(base):
"""
Comes from foitext*.txt
"""
__tablename__ = "Narratives"
report_key = Column(Integer, ForeignKey('Events.report_key'))
text_key = Column(Integer, primary_key=True, autoincrement='False')
text_type_code = Column(String)
patient_sequence_number = Column(String)
date_report = Column(String)
text = Column(String)
event = relationship("Event")
def __repr__(self):
return "<Narrative('%s','%s')>" %(self.report_key, self.text[0:10])
class Patient(base):
"""
Comes from patient*.txt
"""
__tablename__ = "Patients"
id = Column(Integer, primary_key=True, autoincrement='ignore_fk')
report_key = Column(Integer, ForeignKey('Events.report_key'))
patient_sequence_number = Column(String)
date_received = Column(String)
sequence_number_treatment = Column(String)
sequence_number_outcome = Column(String)
def __repr__(self):
return "<Patient('%s','%s', '%s')>" %(self.report_key, self.patient_sequence_number, self.date_received)
# Map and register models with the database
model_map = {
"Event": Event,
"DeviceProblemCode": DeviceProblemCode,
"Device": Device,
"DeviceProblem": DeviceProblem,
"Narrative": Narrative,
"Patient": Patient,
"Contact": Contact,
}
# maude.database.registry.register_models(model_map)
table_map = {}
for m in model_map:
table_map[m + "s"] = model_map[m]
# maude.database.registry.register_tables(table_map)
| |
#!/usr/bin/env python
#
# Ubuntu Packages :
# apt-get install python-svn
# apt-get install python-elementtree
import sys
import pygtk
pygtk.require('2.0')
import gtk
import gtk.glade
import gnomeapplet
import gnome.ui
import os
import re
import gobject
import gc
import pysvn
import svn_applet_globals
import xml.etree.ElementTree as ET
gobject.type_register(svnApplet)
class svnApplet(gnomeapplet.Applet):
#---------------------------------------------------------------------------
# MAIN
#---------------------------------------------------------------------------
def __init__(self, applet, iid):
""" The main function.
It is here where we build the applet and all graphical elements.
"""
# Initializing application
self.__gobject_init__()
gnome.init(pglobals.name, pglobals.version)
self.config_init()
self.gui_build()
# Update info from filesystem.
# We define the callback function for timer request.
gobject.timeout_add(self.timeout_interval, self.handler_timeout, self)
# Connecting the "destroy" signal and show the applet.
applet.connect("destroy", self.handler_shutdown)
applet.show_all()
#---------------------------------------------------------------------------
# CONFIG
#---------------------------------------------------------------------------
def config_init(self):
""" Initialize configuration parameters
"""
self.check = False
self.check_in = False
self.check_interval = 1000
self.dir_root = os.getcwd()
self.dir_icon = os.path.join(self.dir_root, 'icons')
self.file_conf = os.path.join(self.dir_root, 'svn_applet.conf')
self.file_icon_updateon = os.path.join(self.dir_icon, 'svn_applet_icon_24.png')
self.file_icon_updateoff = os.path.join(self.dir_icon, 'svn_applet_icon_24_active.png')
self.file_logo = os.path.join(self.dir_icon, 'svn_applet_icon.png')
def config_get_directories(self, path):
""" Read an XML configuration file which contains svn directories
to check.
"""
tree = ET.parse(path)
directories = []
for directory in tree.findall("svn/directory"):
directories.append(directory.text)
return directories
#---------------------------------------------------------------------------
# CORE
#---------------------------------------------------------------------------
def core_shutdown_handler(self, event):
""" Kill this applet
"""
del self.applet
def core_ssl_server_trust_prompt_handler(self, trust_dict):
""" This function is an handler. Its job is to auto accept all
non trusted certificates.
"""
return True, trust_dict['failures'], False
def core_timeout_handler(self,event):
""" Clock timer.
This function checks if there is jobs to perform.
"""
if self.check and not self.checkin:
self.check = True
if self.svn_checkall() != 0:
self.gui_set_icon(self.file_icon_updateon)
self.checkin = False
return 1
#---------------------------------------------------------------------------
# SVN
#---------------------------------------------------------------------------
def svn_check(self, directory):
""" Check one SVN repository.
Return a tuple (directory, local_rev, remote_rev)
"""
client = pysvn.Client()
client.callback_ssl_server_trust_prompt = self.core_ssl_server_trust_prompt_handler
r_path, r_dict = client.info2(directory, recurse = False)[0]
local_url = r_dict['URL']
local_rev = r_dict['rev'].number
#r_path, r_dict = client.info2(local_url, recurse = False)[0]
#remote_rev = r_dict['rev'].number
remote_rev = 10000
return (r_path, local_rev, remote_rev)
def svn_checkall(self):
""" Check all SVN repositories.
Thus function returns the number of SVN repositories to update.
"""
directories_notUpdated = 0
directories = self.read_configuration(self.file_conf)
for directory in directories:
r_path, l_rev, r_rev = self.svn_check(directory)
if l_rev < r_rev:
directories_notUpdated = directories_notUpdated + 1
return directories_notUpdated
#---------------------------------------------------------------------------
# GUI
#---------------------------------------------------------------------------
def gui_build(self):
""" Build the Gnome Applet
"""
# Build main images.
# self.logo should be used for big image.
# self.icon should be used as icon in Gnome Deskbar.
self.logo = None
self.logo = gtk.gdk.pixbuf_new_from_file(self.file_logo)
self.icon = gtk.Image()
self.gui_set_icon(self.file_icon_updateoff)
self.icon.show()
# This part describes the contains of the applet. We have a popup menu
# when we click on the applet icon. Each items are associated with
# a function which should show a particular window.
propxml="""
<popup name="button3">
<menuitem name="prefs" verb="Refresh" label="Force"
pixtype="stock" pixname="gtk-refresh" />
<separator/>
<menuitem name="prefs" verb="Preferences" label="Preferences"
pixtype="stock" pixname="gtk-properties" />
<menuitem name="about" verb="About" label="_About"
pixtype="stock" pixname="gtk-about"/>
</popup>"""
verbs = [
("About", self.gui_window_about),
("Preferences", self.gui_window_configure),
("Refresh", self.gui_window_refresh)
]
# Now, build the applet.
self.hbox = gtk.HBox()
self.hbox.pack_start(self.icon)
self.applet = applet
self.applet.add(self.hbox)
self.applet.setup_menu(propxml, verbs, None)
# The tooltips
# This item print information when user point this icon applet.
tooltips = gtk.Tooltips()
tooltips.set_tip(applet, "Subversion Applet", tip_private=None)
def gui_set_icon(self, path):
""" Update the applet icon.
"""
self.icon.clear()
gc.collect()
self.icon.set_from_file(path)
def gui_window_about(self, *arguments, **keywords):
""" Show a Gnome About window
"""
about = gtk.AboutDialog()
about.set_name("Subversion Applet")
about.set_version("0.1")
about.set_license(pglobals.license)
about.set_wrap_license(True)
about.set_website("https://projects.aepik.net/p/gnome-svn-applet/")
about.set_authors(["Thomas Chemineau : Project Leader"])
about.set_logo(self.logo)
about.connect("response", self.gui_window_about_handler)
about.show()
def gui_window_about_handler(self, window, response):
""" This is the handler function of gui_window_about.
"""
window.hide()
window.destroy()
def gui_window_configure(self, *arguments, **keywords):
""" Show the configuration window.
The user will be able to choose directories to monitore, and ajust
time of checking.
"""
return
def gui_window_refresh(self, *arguments, **keywords):
""" Show a little window that ask for refreshing all svn checks.
"""
refresh = gtk.MessageDialog(
parent = None,
flags = 0,
type = gtk.MESSAGE_INFO,
buttons = gtk.BUTTONS_OK + gtk.BUTTONS_CANCEL,
message_format = "Checking all defined subversion directories ?"
)
refresh.set_default_response(gtk.RESPONSE_CANCEL)
refresh.connect("response", self.gui_window_refresh_handler)
refresh.show()
def gui_window_refresh_handler(self, window, response):
""" This is the handler function of gui_window_refresh.
It is here where we decide to check or not all svn repositories.
"""
window.hide()
if response == gtk.RESPONSE_YES:
self.check = True
window.destroy()
#-------------------------------------------------------------------------------
# Bonobo handler, for Gnome integration.
#-------------------------------------------------------------------------------
def svnAppletFactory(applet, iid):
print "Building"
svnApplet(applet,iid)
return gtk.TRUE
#-------------------------------------------------------------------------------
# Main
#-------------------------------------------------------------------------------
if len(sys.argv) == 2 and sys.argv[1] == "debug":
# Here, it is debug.
main_window = gtk.Window(gtk.WINDOW_TOPLEVEL)
main_window.set_title("Subversion Applet")
main_window.connect("destroy", gtk.main_quit)
app = gnomeapplet.Applet()
svnAppletFactory(app, None)
app.reparent(main_window)
main_window.show_all()
gtk.main()
sys.exit()
else:
# Normal functionnality.
gnomeapplet.bonobo_factory(
"OAFIID:GNOME_SvnApplet_Factory",
svnApplet.__gtype__,
pglobals.name,
pglobals.version,
svnAppletFactory )
| |
import socket
import threading
import time
import warnings
from selenium.webdriver import Chrome
from selenium.common.exceptions import NoSuchWindowException, TimeoutException, WebDriverException
from six.moves import BaseHTTPServer
from six.moves.urllib.parse import urlparse
import requests
import six
import tld
FIND_WINDOW_HANDLE_WARNING = (
'Created window handle could not be found reliably. Using less reliable '
'alternative method. JavaScript redirects are not supported and an '
'additional GET request might be made for the requested URL.'
)
headers = None
update_headers_mutex = threading.Semaphore()
update_headers_mutex.acquire()
# Using a global value to pass around the headers dictionary reference seems to
# be the easiest way to get access to it, since the HTTPServer doesn't keep an
# object of the instance of the HTTPRequestHandler
class HTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
global headers
headers = requests.structures.CaseInsensitiveDict(self.headers if six.PY3 else self.headers.dict)
update_headers_mutex.release()
self.send_response(200)
self.end_headers()
# Immediately close the window as soon as it is loaded
self.wfile.write(six.b('<script type="text/javascript">window.close();</script>'))
# Suppress unwanted logging to stderr
def log_message(self, format, *args):
pass
def get_unused_port():
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.bind(('', 0))
address, port = socket_.getsockname()
socket_.close()
return port
def get_webdriver_request_headers(webdriver):
# There's a small chance that the port was taken since the call of
# get_unused_port(), so make sure we try as often as needed
while True:
port = get_unused_port()
try:
server = BaseHTTPServer.HTTPServer(('', port), HTTPRequestHandler)
break
except socket.error:
pass
threading.Thread(target=server.handle_request).start()
original_window_handle = webdriver.current_window_handle
webdriver.execute_script("window.open('http://127.0.0.1:%d/');" % port)
update_headers_mutex.acquire()
# Possibly optional: Make sure that the webdriver didn't switch the window
# handle to the newly opened window. Behaviors of different webdrivers seem
# to differ greatly here
if webdriver.current_window_handle != original_window_handle:
webdriver.switch_to.window(original_window_handle)
global headers
headers_ = headers
headers = None
# Remove the host header, which will simply contain the localhost address
# of the HTTPRequestHandler instance
del headers_['host']
return headers_
def prepare_requests_cookies(webdriver_cookies):
return dict((str(cookie['name']), str(cookie['value'])) for cookie in webdriver_cookies)
def get_tld(url):
try:
domain = tld.get_tld(url)
except (tld.exceptions.TldBadUrl, tld.exceptions.TldDomainNotFound):
return url
return domain
def find_window_handle(webdriver, callback):
original_window_handle = webdriver.current_window_handle
if callback(webdriver):
return original_window_handle
# Start search beginning with the most recently added window handle, the
# chance is higher that this is the correct one in most cases
for window_handle in reversed(webdriver.window_handles):
if window_handle == original_window_handle:
continue
# This exception can occur if the current window handle was closed
try:
webdriver.switch_to.window(window_handle)
except NoSuchWindowException:
continue
if callback(webdriver):
return window_handle
# Simply switch back to the original window handle and return None if no
# matching window handle was found
webdriver.switch_to.window(original_window_handle)
def make_find_domain_condition(webdriver, requested_domain):
def condition(webdriver):
try:
return get_tld(webdriver.current_url) == requested_domain
# This exception can occur if the current window handle was closed
except NoSuchWindowException:
pass
return condition
class RequestMixin(object):
def request(self, method, url, find_window_handle_timeout=-1, page_load_timeout=-1, **kwargs):
if not hasattr(self, '_seleniumrequests_session'):
self._seleniumrequests_session = requests.Session()
# Workaround for https://github.com/cryzed/Selenium-Requests/issues/2
if isinstance(self, Chrome):
window_handles_before = len(self.window_handles)
self._seleniumrequests_session.headers = get_webdriver_request_headers(self)
# Wait until the newly opened window handle is closed again, to
# prevent switching to it just as it is about to be closed
while len(self.window_handles) > window_handles_before:
pass
else:
self._seleniumrequests_session.headers = get_webdriver_request_headers(self)
# Delete cookies from the request headers, to prevent overwriting
# manually set cookies later. This should only happen when the
# webdriver has cookies set for the localhost
if 'cookie' in self._seleniumrequests_session.headers:
del self._seleniumrequests_session.headers['cookie']
original_window_handle = None
opened_window_handle = None
requested_tld = get_tld(url)
if not get_tld(self.current_url) == requested_tld:
original_window_handle = self.current_window_handle
# Try to find an existing window handle that matches the requested
# top-level domain
condition = make_find_domain_condition(self, requested_tld)
window_handle = find_window_handle(self, condition)
# Create a new window handle manually in case it wasn't found
if not window_handle:
components = urlparse(url)
previous_window_handles = set(self.window_handles)
self.execute_script("window.open('%s://%s/');" % (components.scheme, components.netloc))
difference = set(self.window_handles) - set(previous_window_handles)
if len(difference) == 1:
opened_window_handle = tuple(difference)[0]
# Most WebDrivers will automatically wait until the
# switched-to window handle has finished loading
self.switch_to.window(opened_window_handle)
else:
warnings.warn(FIND_WINDOW_HANDLE_WARNING)
opened_window_handle = find_window_handle(self, condition)
# Window handle could not be found during first pass.
# Either the WebDriver didn't wait for the page to load
# completely (PhantomJS) or there was a redirect and the
# top-level domain changed
if not opened_window_handle:
response = self._seleniumrequests_session.get(url, stream=True)
current_tld = get_tld(response.url)
if current_tld != requested_tld:
condition = make_find_domain_condition(self, current_tld)
# Some WebDrivers (PhantomJS) take some time until the new
# window handle has loaded
start = time.time()
while not opened_window_handle:
opened_window_handle = find_window_handle(self, condition)
if find_window_handle_timeout >= 0 and time.time() - start > find_window_handle_timeout:
raise TimeoutException('window handle could not be found')
# Acquire WebDriver's cookies and merge them with potentially passed
# cookies
cookies = prepare_requests_cookies(self.get_cookies())
if 'cookies' in kwargs:
cookies.update(kwargs['cookies'])
kwargs['cookies'] = cookies
response = self._seleniumrequests_session.request(method, url, **kwargs)
# Set cookies received from the HTTP response in the WebDriver
for cookie in response.cookies:
cookie_dict = {'name': cookie.name, 'value': cookie.value, 'secure': cookie.secure}
if cookie.expires:
cookie_dict['expiry'] = cookie.expires
if cookie.path_specified:
cookie_dict['path'] = cookie.path
# Some WebDrivers (PhantomJS) take some time until the new window
# handle has loaded and cookies can be set
start = time.time()
while page_load_timeout < 0 or time.time() - start <= page_load_timeout:
try:
self.add_cookie(cookie_dict)
break
except WebDriverException:
pass
else:
raise TimeoutException('page took too long to load')
# Don't keep cookies in the Requests session, only use the WebDriver's
self._seleniumrequests_session.cookies.clear()
if opened_window_handle:
self.close()
if original_window_handle:
self.switch_to.window(original_window_handle)
return response
| |
"""
Users
=====
"""
from pipes import quote
import posixpath
import random
import string
from fabric.api import hide, run, settings, sudo, local
from fabtools.group import (
exists as _group_exists,
create as _group_create,
)
from fabtools.files import uncommented_lines
from fabtools.utils import run_as_root
# Python2 and 3 compatibility
from past.builtins import basestring
def exists(name):
"""
Check if a user exists.
"""
with settings(hide('running', 'stdout', 'warnings'), warn_only=True):
return run('getent passwd %(name)s' % locals()).succeeded
_SALT_CHARS = string.ascii_letters + string.digits + './'
def _crypt_password(password):
from crypt import crypt
random.seed()
salt = ''
for _ in range(2):
salt += random.choice(_SALT_CHARS)
crypted_password = crypt(password, salt)
return crypted_password
def create(name, comment=None, home=None, create_home=None, skeleton_dir=None,
group=None, create_group=True, extra_groups=None, password=None,
system=False, shell=None, uid=None, ssh_public_keys=None,
non_unique=False):
"""
Create a new user and its home directory.
If *create_home* is ``None`` (the default), a home directory will be
created for normal users, but not for system users.
You can override the default behaviour by setting *create_home* to
``True`` or ``False``.
If *system* is ``True``, the user will be a system account. Its UID
will be chosen in a specific range, and it will not have a home
directory, unless you explicitely set *create_home* to ``True``.
If *shell* is ``None``, the user's login shell will be the system's
default login shell (usually ``/bin/sh``).
*ssh_public_keys* can be a (local) filename or a list of (local)
filenames of public keys that should be added to the user's SSH
authorized keys (see :py:func:`fabtools.user.add_ssh_public_keys`).
Example::
import fabtools
if not fabtools.user.exists('alice'):
fabtools.user.create('alice')
with cd('/home/alice'):
# ...
"""
# Note that we use useradd (and not adduser), as it is the most
# portable command to create users across various distributions:
# http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/useradd.html
args = []
if comment:
args.append('-c %s' % quote(comment))
if home:
args.append('-d %s' % quote(home))
if group:
args.append('-g %s' % quote(group))
if create_group:
if not _group_exists(group):
_group_create(group)
if extra_groups:
groups = ','.join(quote(group) for group in extra_groups)
args.append('-G %s' % groups)
if create_home is None:
create_home = not system
if create_home is True:
args.append('-m')
elif create_home is False:
args.append('-M')
if skeleton_dir:
args.append('-k %s' % quote(skeleton_dir))
if password:
crypted_password = _crypt_password(password)
args.append('-p %s' % quote(crypted_password))
if system:
args.append('-r')
if shell:
args.append('-s %s' % quote(shell))
if uid:
args.append('-u %s' % uid)
if non_unique:
args.append('-o')
args.append(name)
args = ' '.join(args)
run_as_root('useradd %s' % args)
if ssh_public_keys:
if isinstance(ssh_public_keys, basestring):
ssh_public_keys = [ssh_public_keys]
add_ssh_public_keys(name, ssh_public_keys)
def modify(name, comment=None, home=None, move_current_home=False, group=None,
extra_groups=None, login_name=None, password=None, shell=None,
uid=None, ssh_public_keys=None, non_unique=False):
"""
Modify an existing user.
*ssh_public_keys* can be a (local) filename or a list of (local)
filenames of public keys that should be added to the user's SSH
authorized keys (see :py:func:`fabtools.user.add_ssh_public_keys`).
Example::
import fabtools
if fabtools.user.exists('alice'):
fabtools.user.modify('alice', shell='/bin/sh')
"""
args = []
if comment:
args.append('-c %s' % quote(comment))
if home:
args.append('-d %s' % quote(home))
if move_current_home:
args.append('-m')
if group:
args.append('-g %s' % quote(group))
if extra_groups:
groups = ','.join(quote(group) for group in extra_groups)
args.append('-G %s' % groups)
if login_name:
args.append('-l %s' % quote(login_name))
if password:
crypted_password = _crypt_password(password)
args.append('-p %s' % quote(crypted_password))
if shell:
args.append('-s %s' % quote(shell))
if uid:
args.append('-u %s' % quote(uid))
if non_unique:
args.append('-o')
if args:
args.append(name)
args = ' '.join(args)
run_as_root('usermod %s' % args)
if ssh_public_keys:
if isinstance(ssh_public_keys, basestring):
ssh_public_keys = [ssh_public_keys]
add_ssh_public_keys(name, ssh_public_keys)
def home_directory(name):
"""
Get the absolute path to the user's home directory
Example::
import fabtools
home = fabtools.user.home_directory('alice')
"""
with settings(hide('running', 'stdout')):
return run('echo ~' + name)
def local_home_directory(name=''):
"""
Get the absolute path to the local user's home directory
Example::
import fabtools
local_home = fabtools.user.local_home_directory()
"""
with settings(hide('running', 'stdout')):
return local('echo ~' + name, capture=True)
def authorized_keys(name):
"""
Get the list of authorized SSH public keys for the user
"""
ssh_dir = posixpath.join(home_directory(name), '.ssh')
authorized_keys_filename = posixpath.join(ssh_dir, 'authorized_keys')
return uncommented_lines(authorized_keys_filename, use_sudo=True)
def add_ssh_public_key(name, filename):
"""
Add a public key to the user's authorized SSH keys.
*filename* must be the local filename of a public key that should be
added to the user's SSH authorized keys.
Example::
import fabtools
fabtools.user.add_ssh_public_key('alice', '~/.ssh/id_rsa.pub')
"""
add_ssh_public_keys(name, [filename])
def add_ssh_public_keys(name, filenames):
"""
Add multiple public keys to the user's authorized SSH keys.
*filenames* must be a list of local filenames of public keys that
should be added to the user's SSH authorized keys.
Example::
import fabtools
fabtools.user.add_ssh_public_keys('alice', [
'~/.ssh/id1_rsa.pub',
'~/.ssh/id2_rsa.pub',
])
"""
from fabtools.require.files import (
directory as _require_directory,
file as _require_file,
)
ssh_dir = posixpath.join(home_directory(name), '.ssh')
_require_directory(ssh_dir, mode='700', owner=name, use_sudo=True)
authorized_keys_filename = posixpath.join(ssh_dir, 'authorized_keys')
_require_file(authorized_keys_filename, mode='600', owner=name,
use_sudo=True)
for filename in filenames:
with open(filename) as public_key_file:
public_key = public_key_file.read().strip()
# we don't use fabric.contrib.files.append() as it's buggy
if public_key not in authorized_keys(name):
sudo('echo %s >>%s' % (quote(public_key),
quote(authorized_keys_filename)))
def add_host_keys(name, hostname):
"""
Add all public keys of a host to the user's SSH known hosts file
"""
from fabtools.require.files import (
directory as _require_directory,
file as _require_file,
)
ssh_dir = posixpath.join(home_directory(name), '.ssh')
_require_directory(ssh_dir, mode='700', owner=name, use_sudo=True)
known_hosts_filename = posixpath.join(ssh_dir, 'known_hosts')
_require_file(known_hosts_filename, mode='644', owner=name, use_sudo=True)
known_hosts = uncommented_lines(known_hosts_filename, use_sudo=True)
with hide('running', 'stdout'):
res = run('ssh-keyscan -t rsa,dsa %s 2>/dev/null' % hostname)
for host_key in res.splitlines():
if host_key not in known_hosts:
sudo('echo %s >>%s' % (quote(host_key),
quote(known_hosts_filename)))
| |
"""
Matrix functions that use Pade approximation with inverse scaling and squaring.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu
from scipy.linalg.decomp_schur import schur, rsf2csf
from scipy.linalg.matfuncs import funm
from scipy.linalg import svdvals, solve_triangular
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import onenormest
import scipy.special
class LogmRankWarning(UserWarning):
pass
class LogmExactlySingularWarning(LogmRankWarning):
pass
class LogmNearlySingularWarning(LogmRankWarning):
pass
class LogmError(np.linalg.LinAlgError):
pass
class FractionalMatrixPowerError(np.linalg.LinAlgError):
pass
#TODO renovate or move this class when scipy operators are more mature
class _MatrixM1PowerOperator(LinearOperator):
"""
A representation of the linear operator (A - I)^p.
"""
def __init__(self, A, p):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0 or p != int(p):
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self.ndim = A.ndim
self.shape = A.shape
def matvec(self, x):
for i in range(self._p):
x = self._A.dot(x) - x
return x
def rmatvec(self, x):
for i in range(self._p):
x = x.dot(self._A) - x
return x
def matmat(self, X):
for i in range(self._p):
X = self._A.dot(X) - X
return X
@property
def T(self):
return _MatrixM1PowerOperator(self._A.T, self._p)
#TODO renovate or move this function when scipy operators are more mature
def _onenormest_m1_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of (A - I)^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return onenormest(_MatrixM1PowerOperator(A, p),
t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w)
def _unwindk(z):
"""
Compute the scalar unwinding number.
Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i).
Note that this definition differs in sign from the original definition
in equations (5, 6) in [2]_. The sign convention is justified in [3]_.
Parameters
----------
z : complex
A complex number.
Returns
-------
unwinding_number : integer
The scalar unwinding number of z.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
.. [2] Robert M. Corless and David J. Jeffrey,
"The unwinding number." Newsletter ACM SIGSAM Bulletin
Volume 30, Issue 2, June 1996, Pages 28-35.
.. [3] Russell Bradford and Robert M. Corless and James H. Davenport and
David J. Jeffrey and Stephen M. Watt,
"Reasoning about the elementary functions of complex analysis"
Annals of Mathematics and Artificial Intelligence,
36: 303-318, 2002.
"""
return int(np.ceil((z.imag - np.pi) / (2*np.pi)))
def _briggs_helper_function(a, k):
"""
Computes r = a^(1 / (2^k)) - 1.
This is algorithm (2) of [1]_.
The purpose is to avoid a danger of subtractive cancellation.
For more computational efficiency it should probably be cythonized.
Parameters
----------
a : complex
A complex number preferably belonging to the closed negative real axis.
k : integer
A nonnegative integer.
Returns
-------
r : complex
The value r = a^(1 / (2^k)) - 1 computed with less cancellation.
Notes
-----
The algorithm as written in the publication does not handle k=0 or k=1
correctly, so these are special-cased in this implementation.
This function is intended to not allow `a` to belong to the closed
negative real axis, but this is constraint is relaxed.
References
----------
.. [1] Awad H. Al-Mohy (2012)
"A more accurate Briggs method for the logarithm",
Numerical Algorithms, 59 : 393--402.
"""
if k < 0 or int(k) != k:
raise ValueError('expected a nonnegative integer k')
if k == 0:
return a - 1
elif k == 1:
return np.sqrt(a) - 1
else:
k_hat = k
if np.angle(a) >= np.pi / 2:
a = np.sqrt(a)
k_hat = k - 1
z0 = a - 1
a = np.sqrt(a)
r = 1 + a
for j in range(1, k_hat):
a = np.sqrt(a)
r = r * (1 + a)
r = z0 / r
return r
def _fractional_power_superdiag_entry(l1, l2, t12, p):
"""
Compute a superdiagonal entry of a fractional matrix power.
This is Eq. (5.6) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
p : float
A fractional power.
Returns
-------
f12 : complex
A superdiagonal entry of the fractional matrix power.
Notes
-----
Some amount of care has been taken to return a real number
if all of the inputs are real.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if l1 == l2:
f12 = t12 * p * l1**(p-1)
elif abs(l1) < abs(l2) / 2 or abs(l2) < abs(l1) / 2:
f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1)
else:
# This is Eq. (5.5) in [1].
z = (l2 - l1) / (l2 + l1)
log_l1 = np.log(l1)
log_l2 = np.log(l2)
arctanh_z = np.arctanh(z)
tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1))
tmp_u = _unwindk(log_l2 - log_l1)
if tmp_u:
tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u)
else:
tmp_b = p * arctanh_z
tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1)
f12 = tmp_a * tmp_c
return f12
def _logm_superdiag_entry(l1, l2, t12):
"""
Compute a superdiagonal entry of a matrix logarithm.
This is Eq. (11.28) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
Returns
-------
f12 : complex
A superdiagonal entry of the matrix logarithm.
Notes
-----
Some amount of care has been taken to return a real number
if all of the inputs are real.
References
----------
.. [1] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
"""
if l1 == l2:
f12 = t12 / l1
elif abs(l1) < abs(l2) / 2 or abs(l2) < abs(l1) / 2:
f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1)
else:
z = (l2 - l1) / (l2 + l1)
ua = _unwindk(np.log(l2) - np.log(l1))
ub = _unwindk(np.log(1+z) - np.log(1-z))
u = ua + ub
if u:
f12 = t12 * (2*np.arctanh(z) + 2*np.pi*1j*(ua + ub)) / (l2 - l1)
else:
f12 = t12 * 2 * np.arctanh(z) / (l2 - l1)
return f12
def _inverse_squaring_helper(T0, theta):
"""
A helper function for inverse scaling and squaring for Pade approximation.
Parameters
----------
T0 : (N, N) array_like upper triangular
Matrix involved in inverse scaling and squaring.
theta : indexable
The values theta[1] .. theta[7] must be available.
They represent bounds related to Pade approximation, and they depend
on the matrix function which is being computed.
For example, different values of theta are required for
matrix logarithm than for fractional matrix power.
Returns
-------
R : (N, N) array_like upper triangular
Composition of zero or more matrix square roots of T0, minus I.
s : non-negative integer
Number of square roots taken.
m : positive integer
The degree of the Pade approximation.
Notes
-----
This subroutine appears as a chunk of lines within
a couple of published algorithms; for example it appears
as lines 4--35 in algorithm (3.1) of [1]_, and
as lines 3--34 in algorithm (4.1) of [2]_.
The instances of 'goto line 38' in algorithm (3.1) of [1]_
probably mean 'goto line 36' and have been intepreted accordingly.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
if len(T0.shape) != 2 or T0.shape[0] != T0.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T0.shape
T = T0
# Find s0, the smallest s such that the spectral radius
# of a certain diagonal matrix is at most theta[7].
# Note that because theta[7] < 1,
# this search will not terminate if any diagonal entry of T is zero.
s0 = 0
tmp_diag = np.diag(T)
if np.count_nonzero(tmp_diag) != n:
raise Exception('internal inconsistency')
while np.max(np.absolute(tmp_diag - 1)) > theta[7]:
tmp_diag = np.sqrt(tmp_diag)
s0 += 1
# Take matrix square roots of T.
for i in range(s0):
T = _sqrtm_triu(T)
# Flow control in this section is a little odd.
# This is because I am translating algorithm descriptions
# which have GOTOs in the publication.
s = s0
k = 0
d2 = _onenormest_m1_power(T, 2) ** (1/2)
d3 = _onenormest_m1_power(T, 3) ** (1/3)
a2 = max(d2, d3)
m = None
for i in (1, 2):
if a2 <= theta[i]:
m = i
break
while m is None:
if s > s0:
d3 = _onenormest_m1_power(T, 3) ** (1/3)
d4 = _onenormest_m1_power(T, 4) ** (1/4)
a3 = max(d3, d4)
if a3 <= theta[7]:
j1 = min(i for i in (3, 4, 5, 6, 7) if a3 <= theta[i])
if j1 <= 6:
m = j1
break
elif a3 / 2 <= theta[5] and k < 2:
k += 1
T = _sqrtm_triu(T)
s += 1
continue
d5 = _onenormest_m1_power(T, 5) ** (1/5)
a4 = max(d4, d5)
eta = min(a3, a4)
for i in (6, 7):
if eta <= theta[i]:
m = i
break
if m is not None:
break
T = _sqrtm_triu(T)
s += 1
# The subtraction of the identity is redundant here,
# because the diagonal will be replaced for improved numerical accuracy,
# but this formulation should help clarify the meaning of R.
R = T - np.identity(n)
# Replace the diagonal and first superdiagonal of T0^(1/(2^s)) - I
# using formulas that have less subtractive cancellation.
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
for j in range(n):
a = T0[j, j]
r = _briggs_helper_function(a, s)
R[j, j] = r
p = np.exp2(-s)
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
R[j, j+1] = f12
# Return the T-I matrix, the number of square roots, and the Pade degree.
if not np.array_equal(R, np.triu(R)):
raise Exception('internal inconsistency')
return R, s, m
def _fractional_power_pade_constant(i, t):
# A helper function for matrix fractional power.
if i < 1:
raise ValueError('expected a positive integer i')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
if i == 1:
return -t
elif i % 2 == 0:
j = i // 2
return (-j + t) / (2 * (2*j - 1))
elif i % 2 == 1:
j = (i - 1) // 2
return (-j - t) / (2 * (2*j + 1))
else:
raise Exception('internal error')
def _fractional_power_pade(R, t, m):
"""
Evaluate the Pade approximation of a fractional matrix power.
Evaluate the degree-m Pade approximation of R
to the fractional matrix power t using the continued fraction
in bottom-up fashion using algorithm (4.1) in [1]_.
Parameters
----------
R : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
m : positive integer
Degree of Pade approximation.
Returns
-------
U : (N, N) array_like
The degree-m Pade approximation of R to the fractional power t.
This matrix will be upper triangular.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if m < 1 or int(m) != m:
raise ValueError('expected a positive integer m')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
R = np.asarray(R)
if len(R.shape) != 2 or R.shape[0] != R.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = R.shape
ident = np.identity(n)
Y = R * _fractional_power_pade_constant(2*m, t)
for j in range(2*m - 1, 0, -1):
rhs = R * _fractional_power_pade_constant(j, t)
Y = solve_triangular(ident + Y, rhs)
U = ident + Y
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _remainder_matrix_power_triu(T, t):
"""
Compute a fractional power of an upper triangular matrix.
The fractional power is restricted to fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
m_to_theta = {
1: 1.51e-5,
2: 2.24e-3,
3: 1.88e-2,
4: 6.04e-2,
5: 1.24e-1,
6: 2.00e-1,
7: 2.79e-1,
}
n, n = T.shape
T0 = T
T0_diag = np.diag(T0)
if np.array_equal(T0, np.diag(T0_diag)):
U = np.diag(T0_diag ** t)
else:
R, s, m = _inverse_squaring_helper(T0, m_to_theta)
# Evaluate the Pade approximation.
# Note that this function expects the negative of the matrix
# returned by the inverse squaring helper.
U = _fractional_power_pade(-R, t, m)
# Undo the inverse scaling and squaring.
# Be less clever about this
# if the principal branch does not exist at T0;
# this happens when a diagonal entry of T0
# is negative with imaginary part 0.
eivals = np.diag(T0)
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals)
for i in range(s, -1, -1):
if i < s:
U = U.dot(U)
else:
if has_principal_branch:
p = t * np.exp2(-i)
U[np.diag_indices(n)] = T0_diag ** p
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
U[j, j+1] = f12
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _remainder_matrix_power(A, t):
"""
Compute the fractional power of a matrix, for fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
A : (N, N) array_like
Matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
# This code block is copied from numpy.matrix_power().
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('input must be a square array')
# Get the number of rows and columns.
n, n = A.shape
# Triangularize the matrix if necessary,
# attempting to preserve dtype if possible.
if np.array_equal(A, np.triu(A)):
Z = None
T = A
else:
if np.isrealobj(A):
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
# Zeros on the diagonal of the triangular matrix are forbidden,
# because the inverse scaling and squaring cannot deal with it.
T_diag = np.diag(T)
if np.count_nonzero(T_diag) != n:
raise FractionalMatrixPowerError(
'cannot use inverse scaling and squaring to find '
'the fractional matrix power of a singular matrix')
# If the triangular matrix is real and has a negative
# entry on the diagonal, then force the matrix to be complex.
if np.isrealobj(T) and np.min(T_diag) < 0:
T = T.astype(complex)
# Get the fractional power of the triangular matrix,
# and de-triangularize it if necessary.
U = _remainder_matrix_power_triu(T, t)
if Z is not None:
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
else:
return U
def _fractional_matrix_power(A, p):
"""
Compute the fractional power of a matrix.
See the fractional_matrix_power docstring in matfuncs.py for more info.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
if p == int(p):
return np.linalg.matrix_power(A, int(p))
# Compute singular values.
s = svdvals(A)
# Inverse scaling and squaring cannot deal with a singular matrix,
# because the process of repeatedly taking square roots
# would not converge to the identity matrix.
if s[-1]:
# Compute the condition number relative to matrix inversion,
# and use this to decide between floor(p) and ceil(p).
k2 = s[0] / s[-1]
p1 = p - np.floor(p)
p2 = p - np.ceil(p)
if p1 * k2 ** (1 - p1) <= -p2 * k2:
a = int(np.floor(p))
b = p1
else:
a = int(np.ceil(p))
b = p2
try:
R = _remainder_matrix_power(A, b)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
except np.linalg.LinAlgError as e:
pass
# If p is negative then we are going to give up.
# If p is non-negative then we can fall back to generic funm.
if p < 0:
X = np.empty_like(A)
X.fill(np.nan)
return X
else:
p1 = p - np.floor(p)
a = int(np.floor(p))
b = p1
R, info = funm(A, lambda x: pow(x, b), disp=False)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
def _logm_triu(T):
"""
Compute matrix logarithm of an upper triangular matrix.
The matrix logarithm is the inverse of
expm: expm(logm(`T`)) == `T`
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose logarithm to evaluate
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `T`
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
T = np.asarray(T)
if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T.shape
# Construct T0 with the appropriate type,
# depending on the dtype and the spectrum of T.
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
if keep_it_real:
T0 = T
else:
T0 = T.astype(complex)
# Define bounds given in Table (2.1).
theta = (None,
1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2,
1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1,
4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1,
6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1)
R, s, m = _inverse_squaring_helper(T0, theta)
# Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1).
# This requires the nodes and weights
# corresponding to degree-m Gauss-Legendre quadrature.
# These quadrature arrays need to be transformed from the [-1, 1] interval
# to the [0, 1] interval.
nodes, weights = scipy.special.p_roots(m)
nodes = nodes.real
if nodes.shape != (m,) or weights.shape != (m,):
raise Exception('internal error')
nodes = 0.5 + 0.5 * nodes
weights = 0.5 * weights
ident = np.identity(n)
U = np.zeros_like(R)
for alpha, beta in zip(weights, nodes):
U += solve_triangular(ident + beta*R, alpha*R)
U *= np.exp2(s)
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
# Recompute diagonal entries of U.
U[np.diag_indices(n)] = np.log(np.diag(T0))
# Recompute superdiagonal entries of U.
# This indexing of this code should be renovated
# when newer np.diagonal() becomes available.
for i in range(n-1):
l1 = T0[i, i]
l2 = T0[i+1, i+1]
t12 = T0[i, i+1]
U[i, i+1] = _logm_superdiag_entry(l1, l2, t12)
# Return the logm of the upper triangular matrix.
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _logm_force_nonsingular_triangular_matrix(T, inplace=False):
# The input matrix should be upper triangular.
# The eps is ad hoc and is not meant to be machine precision.
tri_eps = 1e-20
abs_diag = np.absolute(np.diag(T))
if np.any(abs_diag == 0):
exact_singularity_msg = 'The logm input matrix is exactly singular.'
warnings.warn(exact_singularity_msg, LogmExactlySingularWarning)
if not inplace:
T = T.copy()
n = T.shape[0]
for i in range(n):
if not T[i, i]:
T[i, i] = tri_eps
elif np.any(abs_diag < tri_eps):
near_singularity_msg = 'The logm input matrix may be nearly singular.'
warnings.warn(near_singularity_msg, LogmNearlySingularWarning)
return T
def _logm(A):
"""
Compute the matrix logarithm.
See the logm docstring in matfuncs.py for more info.
Notes
-----
In this function we look at triangular matrices that are similar
to the input matrix. If any diagonal entry of such a triangular matrix
is exactly zero then the original matrix is singular.
The matrix logarithm does not exist for such matrices,
but in such cases we will pretend that the diagonal entries that are zero
are actually slightly positive by an ad-hoc amount, in the interest
of returning something more useful than NaN. This will cause a warning.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
n = A.shape[0]
# If the input matrix dtype is integer then copy to a float dtype matrix.
if issubclass(A.dtype.type, np.integer):
A = np.asarray(A, dtype=float)
keep_it_real = np.isrealobj(A)
try:
if np.array_equal(A, np.triu(A)):
A = _logm_force_nonsingular_triangular_matrix(A)
if np.min(np.diag(A)) < 0:
A = A.astype(complex)
return _logm_triu(A)
else:
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T,Z)
else:
T, Z = schur(A, output='complex')
T = _logm_force_nonsingular_triangular_matrix(T, inplace=True)
U = _logm_triu(T)
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
except (SqrtmError, LogmError) as e:
X = np.empty_like(A)
X.fill(np.nan)
return X
| |
import os
import json
import uuid
import typing
import hashlib
import logging
from waterbutler.core import utils
from waterbutler.core import signing
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.core.metadata import BaseMetadata
from waterbutler.core.utils import RequestHandlerContext
from waterbutler.providers.osfstorage import settings
from waterbutler.providers.osfstorage.metadata import OsfStorageFileMetadata
from waterbutler.providers.osfstorage.metadata import OsfStorageFolderMetadata
from waterbutler.providers.osfstorage.metadata import OsfStorageRevisionMetadata
logger = logging.getLogger(__name__)
QUERY_METHODS = ('GET', 'DELETE')
class OSFStorageProvider(provider.BaseProvider):
"""Provider for the Open Science Framework's cloud storage service.
``osfstorage`` is actually a pair of providers. One is the metadata provider, the other is the
actual storage provider, where the data is kept. The OSF is the metadata provider. Any
metadata queries about objects in ``osfstorage`` are routed to the OSF to be answered. For
https://osf.io, the storage provider is ``cloudfiles``. For local testing the ``filesystem``
provider is used instead. Uploads and downloads are routed to and from the storage provider,
with additional queries to the metadata provider to set and get metadata about the object.
"""
__version__ = '0.0.1'
NAME = 'osfstorage'
def __init__(self, auth, credentials, settings):
super().__init__(auth, credentials, settings)
self.nid = settings['nid']
self.root_id = settings['rootId']
self.BASE_URL = settings['baseUrl']
self.provider_name = settings['storage'].get('provider')
async def validate_v1_path(self, path, **kwargs):
if path == '/':
return WaterButlerPath('/', _ids=[self.root_id], folder=True)
implicit_folder = path.endswith('/')
obj_id = path.strip('/')
resp = await self.make_signed_request(
'GET',
self.build_url(obj_id, 'lineage'),
expects=(200,)
)
data = await resp.json()
explicit_folder = data['data'][0]['kind'] == 'folder'
if explicit_folder != implicit_folder:
raise exceptions.NotFoundError(str(path))
names, ids = zip(*[(x['name'], x['id']) for x in reversed(data['data'])])
return WaterButlerPath('/'.join(names), _ids=ids, folder=explicit_folder)
async def validate_path(self, path, **kwargs):
if path == '/':
return WaterButlerPath('/', _ids=[self.root_id], folder=True)
ends_with_slash = path.endswith('/')
try:
path, name = path.strip('/').split('/')
except ValueError:
path, name = path, None
async with self.signed_request(
'GET',
self.build_url(path, 'lineage'),
expects=(200, 404)
) as resp:
if resp.status == 404:
return WaterButlerPath(path, _ids=(self.root_id, None), folder=path.endswith('/'))
data = await resp.json()
is_folder = data['data'][0]['kind'] == 'folder'
names, ids = zip(*[(x['name'], x['id']) for x in reversed(data['data'])])
if name is not None:
ids += (None, )
names += (name, )
is_folder = ends_with_slash
return WaterButlerPath('/'.join(names), _ids=ids, folder=is_folder)
async def revalidate_path(self, base, path, folder=False):
assert base.is_dir
try:
data = next(
x for x in
await self.metadata(base)
if x.name == path and
x.kind == ('folder' if folder else 'file')
)
return base.child(data.name, _id=data.path.strip('/'), folder=folder)
except StopIteration:
return base.child(path, folder=folder)
def make_provider(self, settings):
"""Requests on different files may need to use different providers,
instances, e.g. when different files lives in different containers
within a provider. This helper creates a single-use provider instance
that optionally overrides the settings.
:param dict settings: Overridden settings
"""
if not getattr(self, '_inner_provider', None):
self._inner_provider = utils.make_provider(
self.provider_name,
self.auth,
self.credentials['storage'],
self.settings['storage'],
)
return self._inner_provider
def can_duplicate_names(self):
return True
def is_same_region(self, other):
assert isinstance(other, self.__class__), 'Cannot compare region for providers of ' \
'different provider classes.'
# Region does not apply to local development with filesystem as storage backend.
if self.settings['storage']['provider'] == 'filesystem':
return True
# For 1-to-1 bucket-region mapping, bucket is the same if and only if region is the same
return self.settings['storage']['bucket'] == other.settings['storage']['bucket']
def can_intra_copy(self, other, path=None):
return isinstance(other, self.__class__) and self.is_same_region(other)
def can_intra_move(self, other, path=None):
return isinstance(other, self.__class__) and self.is_same_region(other)
async def intra_move(self, dest_provider, src_path, dest_path):
return await self._do_intra_move_or_copy('move', dest_provider, src_path, dest_path)
async def intra_copy(self, dest_provider, src_path, dest_path):
return await self._do_intra_move_or_copy('copy', dest_provider, src_path, dest_path)
def build_signed_url(self, method, url, data=None, params=None, ttl=100, **kwargs):
signer = signing.Signer(settings.HMAC_SECRET, settings.HMAC_ALGORITHM)
if method.upper() in QUERY_METHODS:
signed = signing.sign_data(signer, params or {}, ttl=ttl)
params = signed
else:
signed = signing.sign_data(signer, json.loads(data or {}), ttl=ttl)
data = json.dumps(signed)
# Ensure url ends with a /
if not url.endswith('/'):
if '?' not in url:
url += '/'
elif url[url.rfind('?') - 1] != '/':
url = url.replace('?', '/?')
return url, data, params
async def make_signed_request(self, method, url, data=None, params=None, ttl=100, **kwargs):
url, data, params = self.build_signed_url(method, url, data=data, params=params, ttl=ttl, **kwargs)
return await self.make_request(method, url, data=data, params=params, **kwargs)
def signed_request(self, *args, **kwargs):
return RequestHandlerContext(self.make_signed_request(*args, **kwargs))
async def download(self, path, version=None, revision=None, mode=None, **kwargs):
if not path.identifier:
raise exceptions.NotFoundError(str(path))
self.metrics.add('download', {
'mode_provided': mode is not None,
'version_from': 'revision' if version is None else 'version',
'user_logged_in': self.auth.get('id', None) is not None,
})
if version is None:
# TODO Clean this up
# version could be 0 here
version = revision
# Capture user_id for analytics if user is logged in
user_param = {}
if self.auth.get('id', None):
user_param = {'user': self.auth['id']}
# osf storage metadata will return a virtual path within the provider
async with self.signed_request(
'GET',
self.build_url(path.identifier, 'download', version=version, mode=mode),
expects=(200, ),
params=user_param,
throws=exceptions.DownloadError,
) as resp:
data = await resp.json()
provider = self.make_provider(data['settings'])
name = data['data'].pop('name')
data['data']['path'] = await provider.validate_path('/' + data['data']['path'])
download_kwargs = {}
download_kwargs.update(kwargs)
download_kwargs.update(data['data'])
download_kwargs['displayName'] = kwargs.get('displayName', name)
return await provider.download(**download_kwargs)
async def upload(self, stream, path, **kwargs):
"""Upload a new file to osfstorage
When a file is uploaded to osfstorage, WB does a bit of a dance to make sure it gets there
reliably. First we take the stream and add several hash calculators that can determine the
hash of the file as it streams through. We then upload the file to a uuid-named file on the
remote storage provider. Once that's complete, we determine the file's final name, which
will be its sha256 hash. We then check to see if a file already exists at that path on the
remote storage provider. If it does, we can skip moving the file (since it has already been
uploaded) and instead delete the pending file. If it does not, we move the file on the
remote storage provider from the pending path to its final path.
Once this is done the file metadata is sent back to the metadata provider to be recorded.
Finally, WB constructs its metadata response and sends that back to the original request
issuer.
"""
metadata = await self._send_to_storage_provider(stream, path, **kwargs)
metadata = metadata.serialized()
data, created = await self._send_to_metadata_provider(stream, path, metadata, **kwargs)
name = path.name
metadata.update({
'name': name,
'md5': data['data']['md5'],
'path': data['data']['path'],
'sha256': data['data']['sha256'],
'version': data['data']['version'],
'downloads': data['data']['downloads'],
'checkout': data['data']['checkout'],
'modified': data['data']['modified'],
'modified_utc': utils.normalize_datetime(data['data']['modified']),
})
path._parts[-1]._id = metadata['path'].strip('/')
return OsfStorageFileMetadata(metadata, str(path)), created
async def delete(self, path, confirm_delete=0, **kwargs):
"""Delete file, folder, or provider root contents
:param OsfStoragePath path: path to delete
:param int confirm_delete: Must be 1 to confirm root folder delete
"""
if path.identifier is None:
raise exceptions.NotFoundError(str(path))
self.metrics.add('delete.is_root_delete', path.is_root)
if path.is_root:
self.metrics.add('delete.root_delete_confirmed', confirm_delete == 1)
if confirm_delete == 1:
await self._delete_folder_contents(path)
return
else:
raise exceptions.DeleteError(
'confirm_delete=1 is required for deleting root provider folder',
code=400,
)
await (await self.make_signed_request(
'DELETE',
self.build_url(path.identifier),
params={'user': self.auth['id']},
expects=(200, )
)).release()
async def metadata(self, path, **kwargs):
if path.identifier is None:
raise exceptions.MetadataError('{} not found'.format(str(path)), code=404)
if not path.is_dir:
return await self._item_metadata(path)
return await self._children_metadata(path)
async def revisions(self, path, view_only=None, **kwargs):
if path.identifier is None:
raise exceptions.MetadataError('File not found', code=404)
self.metrics.add('revisions', {'got_view_only': view_only is not None})
async with self.signed_request(
'GET',
self.build_url(path.identifier, 'revisions', view_only=view_only),
expects=(200, )
) as resp:
return [
OsfStorageRevisionMetadata(item)
for item in (await resp.json())['revisions']
]
async def create_folder(self, path, **kwargs):
async with self.signed_request(
'POST',
self.build_url(path.parent.identifier, 'children'),
data=json.dumps({
'kind': 'folder',
'name': path.name,
'user': self.auth['id'],
}),
headers={'Content-Type': 'application/json'},
expects=(201, )
) as resp:
resp_json = await resp.json()
# save new folder's id into the WaterButlerPath object. logs will need it later.
path._parts[-1]._id = resp_json['data']['path'].strip('/')
return OsfStorageFolderMetadata(resp_json['data'], str(path))
async def move(self,
dest_provider: provider.BaseProvider,
src_path: WaterButlerPath,
dest_path: WaterButlerPath,
rename: str=None,
conflict: str='replace',
handle_naming: bool=True) -> typing.Tuple[BaseMetadata, bool]:
"""Override parent's move to support cross-region osfstorage moves while preserving guids
and versions. Delegates to :meth:`.BaseProvider.move` when destination is not osfstorage.
If both providers are in the same region (i.e. `.can_intra_move` is true), then calls that.
Otherwise, will grab a download stream from the source region, send it to the destination
region, *then* execute an `.intra_move` to update the file metada in-place.
"""
# when moving to non-osfstorage, default move is fine
if dest_provider.NAME != 'osfstorage':
return await super().move(dest_provider, src_path, dest_path, rename=rename,
conflict=conflict, handle_naming=handle_naming)
args = (dest_provider, src_path, dest_path)
kwargs = {'rename': rename, 'conflict': conflict}
self.provider_metrics.add('move', {
'got_handle_naming': handle_naming,
'conflict': conflict,
'got_rename': rename is not None,
})
if handle_naming:
dest_path = await dest_provider.handle_naming(
src_path,
dest_path,
rename=rename,
conflict=conflict,
)
args = (dest_provider, src_path, dest_path)
kwargs = {}
# files and folders shouldn't overwrite themselves
if (
self.shares_storage_root(dest_provider) and
src_path.materialized_path == dest_path.materialized_path
):
raise exceptions.OverwriteSelfError(src_path)
self.provider_metrics.add('move.can_intra_move', False)
if self.can_intra_move(dest_provider, src_path):
self.provider_metrics.add('move.can_intra_move', True)
return await self.intra_move(*args)
if src_path.is_dir:
meta_data, created = await self._folder_file_op(self.move, *args, **kwargs) # type: ignore
await self.delete(src_path)
else:
download_stream = await self.download(src_path)
if getattr(download_stream, 'name', None):
dest_path.rename(download_stream.name)
await dest_provider._send_to_storage_provider(download_stream, # type: ignore
dest_path, **kwargs)
meta_data, created = await self.intra_move(dest_provider, src_path, dest_path)
return meta_data, created
async def copy(self,
dest_provider: provider.BaseProvider,
src_path: WaterButlerPath,
dest_path: WaterButlerPath,
rename: str=None,
conflict: str='replace',
handle_naming: bool=True) -> typing.Tuple[BaseMetadata, bool]:
"""Override parent's copy to support cross-region osfstorage copies. Delegates to
:meth:`.BaseProvider.copy` when destination is not osfstorage. If both providers are in the
same region (i.e. `.can_intra_copy` is true), call `.intra_copy`. Otherwise, grab a
download stream from the source region, send it to the destination region, *then* execute
an `.intra_copy` to make new file metadata entries in the OSF.
This is needed because a same-region osfstorage copy will duplicate *all* the versions of
the file, but `.BaseProvider.copy` will only copy the most recent version.
"""
# when moving to non-osfstorage, default move is fine
if dest_provider.NAME != 'osfstorage':
return await super().copy(dest_provider, src_path, dest_path, rename=rename,
conflict=conflict, handle_naming=handle_naming)
args = (dest_provider, src_path, dest_path)
kwargs = {'rename': rename, 'conflict': conflict}
self.provider_metrics.add('copy', {
'got_handle_naming': handle_naming,
'conflict': conflict,
'got_rename': rename is not None,
})
if handle_naming:
dest_path = await dest_provider.handle_naming(
src_path,
dest_path,
rename=rename,
conflict=conflict,
)
args = (dest_provider, src_path, dest_path)
kwargs = {}
# files and folders shouldn't overwrite themselves
if (
self.shares_storage_root(dest_provider) and
src_path.materialized_path == dest_path.materialized_path
):
raise exceptions.OverwriteSelfError(src_path)
self.provider_metrics.add('copy.can_intra_copy', False)
if self.can_intra_copy(dest_provider, src_path):
self.provider_metrics.add('copy.can_intra_copy', True)
return await self.intra_copy(*args)
if src_path.is_dir:
meta_data, created = await self._folder_file_op(self.copy, *args, **kwargs) # type: ignore
else:
download_stream = await self.download(src_path)
if getattr(download_stream, 'name', None):
dest_path.rename(download_stream.name)
await dest_provider._send_to_storage_provider(download_stream, # type: ignore
dest_path, **kwargs)
meta_data, created = await self.intra_copy(dest_provider, src_path, dest_path)
return meta_data, created
# ========== private ==========
async def _item_metadata(self, path, revision=None):
async with self.signed_request(
'GET',
self.build_url(path.identifier, revision=revision),
expects=(200, )
) as resp:
return OsfStorageFileMetadata((await resp.json()), str(path))
async def _children_metadata(self, path):
async with self.signed_request(
'GET',
self.build_url(path.identifier, 'children', user_id=self.auth.get('id')),
expects=(200, )
) as resp:
resp_json = await resp.json()
ret = []
for item in resp_json:
if item['kind'] == 'folder':
ret.append(OsfStorageFolderMetadata(item, str(path.child(item['name'], folder=True))))
else:
ret.append(OsfStorageFileMetadata(item, str(path.child(item['name']))))
return ret
async def _delete_folder_contents(self, path, **kwargs):
"""Delete the contents of a folder. For use against provider root.
:param OsfStoragePath path: OsfStoragePath path object for folder
"""
meta = (await self.metadata(path))
for child in meta:
osf_path = await self.validate_path(child.path)
await self.delete(osf_path)
async def _do_intra_move_or_copy(self, action: str, dest_provider, src_path, dest_path):
"""Update files and folders on osfstorage with a single request.
If the data of the file or the folder's children doesn't need to be copied to another
bucket, then doing an intra-move or intra-copy is just a matter of updating the entity
metadata in the OSF. If something already exists at ``dest_path``, it must be deleted
before relocating the source to the new path.
"""
created = True
if dest_path.identifier:
created = False
await dest_provider.delete(dest_path)
async with self.signed_request(
'POST',
self.build_url('hooks', action),
data=json.dumps({
'user': self.auth['id'],
'source': src_path.identifier,
'destination': {
'name': dest_path.name,
'node': dest_provider.nid,
'parent': dest_path.parent.identifier
}
}),
headers={'Content-Type': 'application/json'},
expects=(200, 201)
) as resp:
data = await resp.json()
if data['kind'] == 'file':
return OsfStorageFileMetadata(data, str(dest_path)), dest_path.identifier is None
folder_meta = OsfStorageFolderMetadata(data, str(dest_path))
dest_path = await dest_provider.validate_v1_path(data['path'])
folder_meta.children = await dest_provider._children_metadata(dest_path)
return folder_meta, created
async def _send_to_storage_provider(self, stream, path, **kwargs):
"""Send uploaded file data to the storage provider, where it will be stored w/o metadata
in a content-addressable format.
:return: metadata of the file as it exists on the storage provider
"""
pending_name = str(uuid.uuid4())
provider = self.make_provider(self.settings)
remote_pending_path = await provider.validate_path('/' + pending_name)
logger.debug('upload: remote_pending_path::{}'.format(remote_pending_path))
stream.add_writer('md5', streams.HashStreamWriter(hashlib.md5))
stream.add_writer('sha1', streams.HashStreamWriter(hashlib.sha1))
stream.add_writer('sha256', streams.HashStreamWriter(hashlib.sha256))
await provider.upload(stream, remote_pending_path, check_created=False,
fetch_metadata=False, **kwargs)
complete_name = stream.writers['sha256'].hexdigest
remote_complete_path = await provider.validate_path('/' + complete_name)
try:
metadata = await provider.metadata(remote_complete_path)
except exceptions.MetadataError as e:
if e.code != 404:
raise
metadata, _ = await provider.move(provider, remote_pending_path, remote_complete_path)
else:
await provider.delete(remote_pending_path)
return metadata
async def _send_to_metadata_provider(self, stream, path, metadata, **kwargs):
"""Send metadata about the uploaded file (including its location on the storage provider) to
the OSF.
:return: metadata of the file and a bool indicating if the file was newly created
"""
async with self.signed_request(
'POST',
self.build_url(path.parent.identifier, 'children'),
expects=(200, 201),
data=json.dumps({
'name': path.name,
'user': self.auth['id'],
'settings': self.settings['storage'],
'metadata': metadata,
'hashes': {
'md5': stream.writers['md5'].hexdigest,
'sha1': stream.writers['sha1'].hexdigest,
'sha256': stream.writers['sha256'].hexdigest,
},
'worker': {
'host': os.uname()[1],
# TODO: Include additional information
'address': None,
'version': self.__version__,
},
}),
headers={'Content-Type': 'application/json'},
) as response:
created = response.status == 201
data = await response.json()
return data, created
| |
#import warnings
#warnings.filterwarnings("ignore", message="using a non-integer number instead of an integer will result in an error in the future")
def template_names():
import glob as glob
template_files = glob.glob('miles_models/Mun*.fits')
return template_files
def choose_templates(templates, age_lim = 20.0, max_nonzero = 5):
#start out by loading in the template files as a table
import numpy as np
import astropy.table as table
import SPaCT
ssp_rows = []
for template in templates:
template = template.rstrip('.fits').split('/')[1]
spectral_range = template[0]
IMF_type = template[1:3]
IMF_slope = float(template[3:7])
Z = SPaCT.plusminus(template[8])*float(template[9:13])
T = float(template[14:])
#print template + ':', spectral_range, IMF_type, IMF_slope, Z, T
ssp_i = [template, spectral_range, IMF_type, IMF_slope, Z, T]
ssp_rows.append(ssp_i)
ssps = table.Table(map(list, zip(*ssp_rows)), names = ['name', 'spectral range', 'IMF type', 'IMF slope', 'Z', 't'])
ssps = ssps[ssps['t'] <= age_lim]
#then pick up to `max_nonzero` number of templates to be nonzero
nonzero_templates = np.random.choice(ssps['name'], np.random.randint(1, max_nonzero + 1), replace = False)
template_weights = np.random.rand(len(ssps['name'])) * [1. if i in nonzero_templates else 0. for i in ssps['name']]
template_weights /= template_weights.sum()
ssps.add_column(table.Column(name = 'weight', data = template_weights))
return ssps
def generate_spectrum(ssps):
'''
generate a pristine spectrum based on weights given in an astropy table of templates
'''
import numpy as np
from astropy.io import fits
import astropy.table as table
#now load in each template as a row in an array
all_templates = np.empty([len(ssps['name']), fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']])
for i, row in enumerate(all_templates):
all_templates[i] = ssps['weight'][i] * fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].data
#if ssps['weight'][i] != 0: print all_templates[i]
clean_spectrum = all_templates.sum(axis = 0)
CRVAL1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CRVAL1']
CDELT1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CDELT1']
NAXIS1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']
l_full = CRVAL1 + np.linspace(0., NAXIS1 * CDELT1, NAXIS1)
clean_spectrum /= np.median(clean_spectrum)
return clean_spectrum, l_full
def generate_LOSVD(spectrum, v_res, moments, plots = False):
'''
Convolve `spectrum` with a Gaussian-like filter, except with nonzero higher-order moments.
This reproduces a velocity field that pPXF will fit
NOTE: nonzero higher-order moments not supported at this time
NOTE: nonzero m1 is not supported (and is a very bad idea) - always use redshift routine to apply this!
'''
import numpy as np
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
#generate a kernel with the given moments
m1, m2, m3, m4, m5, m6 = moments
if m1 != 0.:
while a not in ['y', 'n']:
a = raw_input('Warning! non-zero-centered LOSVDs are not recommended! Proceed? (y/n)')
if a == 'y': break
elif a == 'n': exit()
if moments[2:] != [0., 0., 0., 0.]:
raise ValueError('only nonzero higher-order G-H moments are supported!')
else:
spectrum_LOSVD = ndimage.gaussian_filter1d(spectrum, m2/v_res)
if plots == True:
plt.figure(figsize = (6, 4))
plt.plot(spectrum, c = 'b', label = 'rest-frame')
plt.plot(spectrum_LOSVD, c = 'g', label = 'LOSVD spectrum')
plt.plot(np.abs(spectrum - spectrum_LOSVD), label = 'residual')
plt.legend(loc = 'best')
plt.show()
return spectrum_LOSVD
def redshift_spectrum(l_0, z = None, dz = None):
#redshift a spectrum randomly, and return the new wavelength array, a "real" redshift, and a redshift measurement error
import numpy as np
if z == None:
z = np.random.uniform(0.01, 0.025)
if dz == None:
dz = np.sign(np.random.random() - 0.5) * (10**(np.random.uniform(-1.0, -0.5))) * z #random error beween 1% and 10%, equal probabilities of + and -
#print z, dz
l_1 = l_0 * (1. + z + dz)
return z, dz, l_1
def adjust_FWHM(sharp_spectrum, res_old, res_new, FWHM_old, FWHM_new):
#convolve the spectrum with a Gaussian with a width of the square root of the difference of the squares of the intrument FWHMs
import numpy as np
import scipy.ndimage as ndimage
assert FWHM_new >= FWHM_old
FWHM_dif = np.sqrt(FWHM_new**2. - FWHM_old**2.)
sigma_diff = FWHM_dif/2.355/res_old # Sigma difference in pixels
blurred_spectrum = ndimage.gaussian_filter1d(sharp_spectrum, sigma_diff)
return blurred_spectrum
def downsample_spectrum(l_dense, dense_spectrum, l_sparse):
#linearly interpolate the input dense_spectrum (which has values at all the locations in l_0), to the values in l_1
import numpy as np
import scipy.interpolate as interp
sparse_spectrum = interp.interp1d(l_dense, dense_spectrum, kind = 'linear')(l_sparse)
return l_sparse, sparse_spectrum
def noisify_ifu(spectrum, n, SNR):
#make some number `n` of rows with pure noise, and add similar noise profile to first row
import numpy as np
NAXIS1 = len(spectrum)
raw_noise_IFU = np.random.normal(loc = 0.0, scale = 1./SNR, size = (n, NAXIS1))
empty_fibers = raw_noise_IFU * np.tile(spectrum, reps = (n, 1))
IFU = np.vstack((spectrum, empty_fibers))
galaxy_noise = np.random.normal(loc = 0.0, scale = 1./SNR, size = NAXIS1) * spectrum
IFU[0] = spectrum + galaxy_noise
return IFU, galaxy_noise
def population_sum_models(ssps):
#take a table of templates and weights, and return a spectrum in the specified range
import numpy as np
import scipy.ndimage as ndimage
import astropy.table as table
from astropy.io import fits
#now load in each template as a row in an array
all_templates = np.empty([len(ssps['name']), fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']])
for i, row in enumerate(all_templates):
all_templates[i] = ssps['weight'][i] * fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].data
#if ssps['weight'][i] != 0: print all_templates[i]
real_spectrum = all_templates.sum(axis = 0)
CRVAL1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CRVAL1']
CDELT1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CDELT1']
NAXIS1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']
l_full = CRVAL1 + np.linspace(0., NAXIS1 * CDELT1, NAXIS1)
real_spectrum /= np.median(real_spectrum)
return real_spectrum, l_full
def population_sum_fit(ssps):
import numpy as np
import scipy.ndimage as ndimage
import astropy.table as table
from astropy.io import fits
#now load in each template as a row in an array
all_templates = np.empty([len(ssps['name']), fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']])
for i, row in enumerate(all_templates):
all_templates[i] = ssps['best-fit weights'][i] * fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].data
#if ssps['weight'][i] != 0: print all_templates[i]
derived_spectrum = all_templates.sum(axis = 0)
CRVAL1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CRVAL1']
CDELT1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CDELT1']
NAXIS1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']
l_full = CRVAL1 + np.linspace(0., NAXIS1 * CDELT1, NAXIS1)
derived_spectrum /= np.median(derived_spectrum)
return derived_spectrum, l_full
def pPXF_summary_plots(ssps, instrument_info, pp, lam_sparse, vel, verbose = False):
#make sure `vel` is the sum of the redshift and the kinematic velocity fit
import numpy as np
import matplotlib.pyplot as plt
import astropy.table as table
import colorpy.ciexyz as ciexyz
import colorpy.colormodels as cmodels
import warnings
c = 299792.458
if verbose == True:
print 'non-zero fit templates:'
print ssps[ssps['best-fit weights'] != 0.]['Z', 't', 'best-fit weights']
print 'non-zero real solution templates:'
print ssps[ssps['weight'] != 0.]['Z', 't', 'weight']
#first plot the original and resultant populations
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex = True, figsize = (8, 6))
ax1.set_title('fit')
a = ax1.scatter(ssps['Z'], ssps['t'], c = pp.weights, cmap = 'gnuplot', s = 40, vmin = 0.0, vmax = 1.0, edgecolor = 'grey')
ax2.set_title('reality')
ax2.scatter(ssps['Z'], ssps['t'], c = ssps['weight'], cmap = 'gnuplot', s = 40, vmin = 0.0, vmax = 1.0, edgecolor = 'grey')
plt.colorbar(a)
plt.suptitle('population fit comparison', size = 16)
plt.show()
#now plot the result with the input
instrument_lam_lims = (instrument_info['CRVAL1'], instrument_info['CRVAL1'] + instrument_info['NAXIS1'] * instrument_info['CDELT1'])
lines = [
['Ca H', 3968.5], ['Ca K', 3933.7], ['H-alpha', 6562.8], ['H-beta', 4861.], ['Mg I', 5175.], ['Ca I', 4307.]
]
#plt.figure(figsize = (10, 6))
#ax = plt.subplot(111)
#ax.plot(lam_sparse, pp.bestfit)
print 'vel:', vel, 'km/s'
#now plot relevant spectral lines
'''
with warnings.catch_warnings():
warnings.simplefilter("ignore", category = DeprecationWarning)
for i, line in enumerate(lines):
line_c = cmodels.irgb_string_from_xyz(ciexyz.xyz_from_wavelength(line[1]/10.))
#print line_c
ax.axvline(line[1] * (1. + vel / c), color = line_c)
ax.annotate(line[0], xy = (line[1], 1.2), xytext = (line[1]+10., 1.1 - 0.1 * i%2), size = 14)
'''
#plt.show()
'''
real_spectrum, l_full = population_sum_models(ssps = ssps)
derived_spectrum, l_full = population_sum_fit(ssps = ssps)
plt.figure(figsize = (10, 6))
ax1 = plt.subplot(211)
#first at full resolution
ax1real = ax1.plot(l_full, real_spectrum, c = 'g', label = 'Reality', linewidth = 0.25)
ax1der = ax1.plot(l_full, derived_spectrum, c = 'b', linestyle = '--', label = 'Fit', linewidth = 0.25)
for val in instrument_lam_lims:
ax1.axvline(val, c = 'r', linestyle = ':')
ax1.set_title('Full-Resolution spectra', size = 16)
ax1_1 = ax1.twinx()
ax1err = ax1_1.plot(l_full, np.abs(real_spectrum - derived_spectrum), linewidth = 0.25, c = 'tomato', label = 'Error')
for tl in ax1_1.get_yticklabels(): tl.set_color('tomato')
ax1_l = ax1_1.legend(ax1real + ax1der + ax1err, [l.get_label() for l in (ax1real + ax1der + ax1err)], loc = 'best')
ax1_l.set_zorder(5)
ax2 = plt.subplot(212, sharex = ax1)
ax2.set_title('Downsampled spectra', size = 16)
ax2.set_xlabel(r'$\lambda[\AA]$', size = 16)
#now after blurring and downsampling
l_sparse = np.linspace(instrument_info['CRVAL1'], instrument_info['CRVAL1'] + instrument_info['NAXIS1'] * instrument_info['CDELT1'], instrument_info['NAXIS1'])
l_sparse, sparse_spectrum_real = downsample_spectrum(l_dense = l_full, dense_spectrum = real_spectrum, l_sparse = l_sparse) #this accomplishes both downsampling and paring!!
l_sparse, sparse_spectrum_der = downsample_spectrum(l_dense = l_full, dense_spectrum = derived_spectrum, l_sparse = l_sparse) #this accomplishes both downsampling and paring!!
ax2real = ax2.plot(l_sparse, sparse_spectrum_real, c = 'g', label = 'Reality', linewidth = 0.25)
ax2der = ax2.plot(l_sparse, sparse_spectrum_der, c = 'b', label = 'Fit', linewidth = 0.25, linestyle = '--')
for val in instrument_lam_lims:
ax2.axvline(val, c = 'r', linestyle = ':')
ax2.set_title('Downsampled spectra', size = 16)
ax2.set_xlabel(r'$\lambda[\AA]$', size = 16)
ax2_1 = ax2.twinx()
ax2err = ax2_1.plot(l_sparse, np.abs(sparse_spectrum_real - sparse_spectrum_der), linewidth = 0.25, c = 'tomato', label = 'Error')
for tl in ax2_1.get_yticklabels(): tl.set_color('tomato')
ax2_l = ax2_1.legend(ax2real + ax2der + ax2err, [l.get_label() for l in (ax2real + ax2der + ax2err)], loc = 'best')
ax2_l.set_zorder(5)
plt.tight_layout()
plt.show()
'''
def simulate_noise(sparse_spectrum, SNR, n_skyfiber_range = [1, 20, 3]):
'''
generate synthetic noise spectra for a given input spectrum, and test the required number of sky fibers (with similar noise profiles) to accurately get the SNR
'''
import numpy as np
import SPaCT
import matplotlib.pyplot as plt
plt.figure(figsize = (6, 4))
for n_skyfibers in range(n_skyfiber_range[0], n_skyfiber_range[1] + 1, n_skyfiber_range[2]):
ifu, galaxy_noise = noisify_ifu(sparse_spectrum, n = n_skyfibers, SNR = SNR)
fiberlist = range(1, n_skyfibers + 1)
SNR_calc = ifu[0] / SPaCT.noise_edgefibers(ifu, width = 3, fiberlist = fiberlist, verbose = False)
bins, edges = np.histogram(SNR_calc, 50, normed = 1)
left, right = edges[:-1],edges[1:]
X = np.array([left,right]).T.flatten()
Y = np.array([bins,bins]).T.flatten()
plt.plot(X, Y/Y.max(), label = str(n_skyfibers) + ' fibers')
plt.axvline(SNR, c = 'k', linestyle = ':')
SNR_annotation = plt.text(SNR, 0.35, '$S/N=' + str(SNR) + '$')
SNR_annotation.set_rotation('vertical')
plt.title('Effect of # of sky fibers on SNR', size = 18)
plt.xscale('log')
plt.ylim([-0.05, 1.05])
plt.xlabel('SNR', size = 18)
plt.ylabel('normed fraction', size = 18)
plt.legend(loc = 'best', prop = {'size':6})
plt.tight_layout()
plt.show()
def simulate_single_spectrum():
'''
STEPS:
1. choose templates
2. make spectrum
2.1. convolve with a LOSVD
3. blur to correct FWHM
4. redshift to within some error
5. downsample to correct wavelengths
6. noisify and create an IFU with the same noise characteristics
7. run pPXF
'''
from astropy.io import fits
from astropy import table
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import SPaCT
import scipy.stats as stats
from ppxf import robust_sigma
import warnings
SPSPK_info = fits.open('NGC2558.msobj.fits')[0].header
template_files = template_names()
ssps = choose_templates(templates = template_files, max_nonzero = 4)
clean_spectrum, l_full = generate_spectrum(ssps = ssps)
#now redshift the spectrum
MILES_res = fits.open(template_files[0])[0].header['CDELT1']
SPSPK_res = 1.4
#FWHMs should be in Angstroms
FWHM_MILES = 1.36
FWHM_SPSPK = 4.877 #this is specific to one particular configuration, so handle with care!
SNR = 100.
n_skyfibers = 8
n_moments = 4 #how many moments to fit
'''
This is a temporary solution to the problem of generating moments.
Basically, just set the first one equal to zero (since that rolls out of redshift)
and set 2 - 4 equal to some reasonable values
'''
moments = [0., 45.]
moments += [0. for _ in range(6 - len(moments))] #pad moments out to the length that the LOSVD function accepts
c = 299792.458
l_sparse = np.linspace(SPSPK_info['CRVAL1'], SPSPK_info['CRVAL1'] + SPSPK_info['NAXIS1'] * SPSPK_info['CDELT1'], SPSPK_info['NAXIS1'])
v_res = np.mean(c / (l_sparse / FWHM_SPSPK))
#print 'Instrument velocity resolution:', v_res
generate_LOSVD(spectrum = clean_spectrum, v_res = v_res, moments = moments, plots = False)
blurred_spectrum = adjust_FWHM(sharp_spectrum = clean_spectrum, res_old = MILES_res, res_new = SPSPK_res, FWHM_old = FWHM_MILES, FWHM_new = FWHM_SPSPK)
#now redshift the new blurred (but still full-resolution) spectrum into the observer frame
z, dz, l_full = redshift_spectrum(l_0 = l_full, dz = 0.)
l_sparse, sparse_spectrum = downsample_spectrum(l_dense = l_full, dense_spectrum = blurred_spectrum, l_sparse = l_sparse) #this accomplishes both downsampling and paring!!
#now construct a fake IFU with 8 rows of pure noise at some SNR
ifu, galaxy_noise = noisify_ifu(sparse_spectrum, n = 2, SNR = SNR)
#simulate_noise(sparse_spectrum, SNR = SNR)
#more debugs
'''plt.plot(l_sparse, sparse_spectrum, linewidth = 0.25, label = 'original')
plt.plot(l_sparse, ifu[0], linewidth = 0.25, label = 'noisy')
plt.plot(l_sparse, galaxy_noise, linewidth = 0.25, label = 'sample noise')
plt.legend(loc = 'best')
plt.show()
'''
edgefibers = range(1, len(ifu))
pp = SPaCT.SP_pPXF(ifu/np.median(ifu[0]), fiber = 0, l_summ = (3907., 1.4, 1934),
z = z + dz, verbose = False, noise_plots = False, fit_plots = False,
edgefibers = edgefibers, age_lim = 20., n_moments = n_moments, bias = 100.)
#now compare the resulting redshift
print 'Best-fitting redshift:\t\t', z + dz + pp.sol[0]/c
print 'Real redshift:\t\t\t\t', z
print 'Guess redshift:\t\t\t\t', z + dz
print 'Reduced chi2:', pp.chi2
print ' # | guess | real'
for (i, fit_guess, real_value) in zip(range(1, n_moments + 1), pp.sol, moments[:n_moments]):
print 'moment', str(i), ':', str(np.round(fit_guess, 2)), ':', str(np.round(real_value, 2))
#compare the resulting population fits
#print pp.weights
ssps.add_column(table.Column(name = 'best-fit weights', data = pp.weights/pp.weights.sum()))
pPXF_summary_plots(ssps = ssps, instrument_info = SPSPK_info, pp = pp, lam_sparse = l_sparse, vel = (z + dz) * c + pp.sol[0], verbose = True)
#now return the chi2 parameter for the best-fit, as opposed to the "reality"
print 'Chi-square test'
simulate_single_spectrum()
| |
import os
import copy
import traceback
import codecs
import stat
import shutil
from collections import deque
from .log import LoggerFactory
from .gossip import gossiper
from .library import libstore
from .evaluater import evaluater
from .util import unified_diff, exec_command, PY3
# Global logger for this part
logger = LoggerFactory.create_logger('generator')
GENERATOR_STATES = ['COMPLIANT', 'ERROR', 'UNKNOWN', 'NOT-ELIGIBLE']
GENERATOR_STATE_COLORS = {'COMPLIANT': 'green', 'ERROR': 'red', 'UNKNOWN': 'grey', 'NOT-ELIGIBLE': 'grey'}
class NoElementsExceptions(Exception):
pass
# Get all nodes that are defining a service sname and where the service is OK
# TODO: give a direct link to object, must copy it?
def ok_nodes(group='', if_none=''):
res = deque()
if group == '':
res = []
for n in gossiper.nodes.values(): # note: nodes is a static dict
if n['state'] != 'alive':
continue
res.append(n)
else:
nodes_uuids = gossiper.find_group_nodes(group)
for node_uuid in nodes_uuids:
n = gossiper.get(node_uuid)
if n is not None:
res.append(n)
if if_none == 'raise' and len(res) == 0:
raise NoElementsExceptions()
# Be sure to always give nodes in the same order, if not, files will be generated too ofthen
res = sorted(res, key=lambda node: node['uuid'])
return res
class Generator(object):
def __init__(self, g):
self.g = g
self.name = g['name']
self.pack_name = g['pack_name']
self.pack_level = g['pack_level']
self.buf = None
self.template = None
self.output = None
self.jinja2 = None
self.generate_if = g['generate_if']
self.cur_value = ''
self.current_diff = []
self.log = ''
self.__state = 'UNKNOWN'
self.__old_state = 'UNKNOWN'
self.__did_change = False
def __set_state(self, state):
if self.__state == state:
return
self.__did_change = True
self.__old_state = self.__state
self.__state = state
logger.debug('Compliance rule %s switch from %s to %s' % (self.name, self.__old_state, self.__state))
def get_state(self):
return self.__state
def set_error(self, log):
self.__set_state('ERROR')
self.log = log
def set_compliant(self, log):
self.log = log
logger.info(log)
self.__set_state('COMPLIANT')
def set_not_eligible(self):
self.log = ''
self.__set_state('NOT-ELIGIBLE')
def get_json_dump(self):
return {'name': self.name, 'state': self.__state, 'old_state': self.__old_state, 'log': self.log, 'pack_level': self.pack_level, 'pack_name': self.pack_name, 'diff': self.current_diff, 'path': self.g['path']}
def get_history_entry(self):
if not self.__did_change:
return None
return self.get_json_dump()
def must_be_launched(self):
self.__did_change = False
try:
b = evaluater.eval_expr(self.generate_if)
except Exception as exp:
err = ' (%s) if rule (%s) evaluation did fail: %s' % (self.name, self.generate_if, exp)
self.set_error(err)
logger.error(err)
return False
if not b:
self.set_not_eligible()
return b
# Open the template file and generate the output
def generate(self):
if self.jinja2 is None:
self.jinja2 = libstore.get_jinja2()
# If not jinja2, bailing out
if self.jinja2 is None:
self.set_error('Generator: Error, no jinja2 librairy defined, please install it')
return
try:
f = codecs.open(self.g['template'], 'r', 'utf8')
self.buf = f.read()
f.close()
except IOError as exp:
self.set_error('Cannot open template file %s : %s' % (self.g['template'], exp))
self.buf = None
self.template = None
return
# NOTE: nodes is a static object, node too (or atomic change)
node = gossiper.nodes[gossiper.uuid]
# Now try to make it a jinja template object
try:
env = self.jinja2.Environment(trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True)
except TypeError: # old jinja2 version do not manage keep_trailing_newline nor
# lstrip_blocks (like in redhat6)
env = self.jinja2.Environment(trim_blocks=True)
try:
self.template = env.from_string(self.buf)
except Exception as exp:
self.set_error('Template file %s did raise an error with jinja2 : %s' % (self.g['template'], exp))
self.output = None
self.template = None
self.buf = None
return
# Now try to render all of this with real objects
try:
self.output = self.template.render(nodes=gossiper.nodes, node=node, ok_nodes=ok_nodes)
except NoElementsExceptions:
self.set_error('No nodes did match filters for template : %s %s' % (self.g['template'], self.name))
self.output = None
self.template = None
self.buf = None
return
except Exception:
self.set_error('Template rendering %s did raise an error with jinja2 : %s' % (self.g['template'], traceback.format_exc()))
self.output = None
self.template = None
self.buf = None
return
# if we have a partial generator prepare the output we must check for
if self.output is not None and self.g['partial_start'] and self.g['partial_end']:
self.output = '%s\n%s\n%s\n' % (self.g['partial_start'], self.output, self.g['partial_end'])
logger.debug('Generator %s did generate output:\n%s' % (self.g['name'], self.output))
# If we did generate, try to see if we must write the file, and if so we will have to
# launch the command
def write_if_need(self):
# Maybe there is no output, if so bail out now :)
if self.output is None:
return False
self.cur_value = ''
# first try to load the current file if exist and compare to the generated file
if os.path.exists(self.g['path']):
try:
f = codecs.open(self.g['path'], "r", "utf-8")
self.cur_value = f.read()
f.close()
except IOError as exp:
self.set_error('Cannot open path file %s : %s' % (self.g['path'], exp))
self.output = None
self.template = ''
self.buf = ''
self.current_diff = []
return False
need_regenerate_full = False
need_regenerate_partial = False
if not os.path.exists(self.g['path']):
need_regenerate_full = True
# if partial, just look for inclusion in the file data
if (self.g['partial_start'] and self.g['partial_end']):
if self.output not in self.cur_value:
logger.info('Need to regenerate file %s with new data from generator %s' % (self.g['path'], self.g['name']))
need_regenerate_partial = True
else: # not partial, must be equal to file
if self.output != self.cur_value:
need_regenerate_full = True
# If not exists or the value did change, regenerate it :)
if need_regenerate_full:
logger.debug('Generator %s generate a new value, writing it to %s' % (self.g['name'], self.g['path']))
try:
self.current_diff = unified_diff(self.cur_value, self.output, self.g['path'])
logger.info(u'FULL diff: %s' % u'\n'.join(self.current_diff))
f = codecs.open(self.g['path'], "w", "utf-8")
f.write(self.output)
f.close()
logger.info('Regenerate result: %s' % self.output)
self.set_compliant('Generator %s did generate a new file at %s' % (self.g['name'], self.g['path']))
return True
except IOError as exp:
self.set_error('Cannot write path file %s : %s' % (self.g['path'], exp))
self.output = None
self.template = ''
self.buf = ''
self.current_diff = []
return False
# If not exists or the value did change, regenerate it :)
if need_regenerate_partial:
logger.debug('Generator %s generate partial file writing it to %s' % (self.g['name'], self.g['path']))
try:
f = codecs.open(self.g['path'], "r", "utf-8")
orig_content = f.read()
# As we will pslit lines and so lost the \n we should look if the last one was ending with one or not
orig_content_finish_with_new_line = (orig_content[-1] == '\n')
lines = orig_content.splitlines()
logger.debug('ORIGINLL CONTENT: %s' % orig_content)
del orig_content
f.close()
# find the part to remove between start and end of the partial
try:
idx_start = lines.index(self.g['partial_start'])
except ValueError: # not found?
idx_start = None
try:
idx_end = lines.index(self.g['partial_end'])
except ValueError: # not found?
idx_end = None
# Manage partial part not found, so maybe in the end
if idx_start is None or idx_end is None:
if self.g['if_partial_missing'] == 'append':
part_before = lines
part_after = []
logger.debug('APPEND MODE: force a return line? %s' % orig_content_finish_with_new_line)
# if the file did not finish with \n, force one
if not orig_content_finish_with_new_line:
part_before.append('\n')
else:
self.set_error('The generator %s do not have a valid if_partial_missing property' % (self.g['name']))
return False
else: # partial found, look at part before/after
# Maybe there is a bad order in the index?
if idx_start > idx_end:
self.set_error('The partial_start "%s" and partial_end "%s" in the file "%s" for the generator %s are not in the good order' % (self.g['partial_start'], self.g['partial_end'], self.g['path'], self.g['name']))
self.output = None
self.template = ''
self.buf = ''
self.current_diff = []
return False
part_before = lines[:idx_start]
part_after = lines[idx_end + 1:]
last_char = '' if not orig_content_finish_with_new_line else '\n'
new_content = '%s\n%s%s%s' % ('\n'.join(part_before), self.output, '\n'.join(part_after), last_char)
self.current_diff = unified_diff(self.cur_value, new_content, self.g['path'])
logger.debug('Temporary file for partial replacement: %s and %s %s=>%s' % (part_before, part_after, idx_start, idx_end))
logger.debug('New content: %s' % new_content)
logger.info(u'DIFF content: %s' % u'\n'.join(self.current_diff))
tmp_path = '%s.temporary-generator' % self.g['path']
f2 = codecs.open(tmp_path, 'w', 'utf-8')
f2.write(new_content)
logger.info('DID GENERATE NEW CONTENT: %s' % new_content)
f2.close()
# now the second file is ok, move it to the first one place, but with:
# * same user/group
# * same permissions
prev_stats = os.stat(self.g['path'])
prev_uid = prev_stats.st_uid
prev_gid = prev_stats.st_gid
os.chown(tmp_path, prev_uid, prev_gid)
prev_permissions = prev_stats[stat.ST_MODE]
logger.debug('PREV UID GID PERMISSIONS: %s %s %s' % (prev_uid, prev_gid, prev_permissions))
os.chmod(tmp_path, prev_permissions)
shutil.move(tmp_path, self.g['path'])
self.set_compliant('Generator %s did generate a new file at %s' % (self.g['name'], self.g['path']))
return True
except IOError as exp:
self.set_error('Cannot write path file %s : %s' % (self.g['path'], exp))
self.output = None
self.template = ''
self.buf = ''
self.current_diff = []
return False
# If need launch the restart command, shoul not block too long of
# course
def launch_command(self):
cmd = self.g.get('command', '')
if not cmd:
return
try:
rc, output, err = exec_command(cmd)
except Exception as exp:
self.set_error('Generator %s command launch (%s) fail : %s' % (self.g['name'], cmd, exp))
return
if rc != 0:
self.set_error('Generator %s command launch (%s) error (rc=%s): %s' % (self.g['name'], cmd, rc, '\n'.join([output, err])))
return
logger.info('Generator %s command launch (%s) SUCCESS (rc=%s): %s' % (self.g['name'], cmd, rc, '\n'.join([output, err])))
| |
__author__ = 'ehiller@css.edu'
__author__ = 'ram8647@gmail.com'
import datetime
import logging
import random
import traceback
from models import transforms
from models.models import Student
from models.models import EventEntity
from models import utils as models_utils
from models import jobs
from models import event_transforms
from models.models import QuestionDAO
from models.models import QuestionGroupDAO
from models.models import MemcacheManager
from models.progress import UnitLessonCompletionTracker
GLOBAL_DEBUG = False
class ActivityScoreParser(jobs.MapReduceJob):
"""
Class to parse the data returned from query of Event activities.
An tag-assessment Event is recorded every time a student attempts
a GCB question or Quizly exercise. The only way to determine
student performance on questions and exercises is to
query the Events database, extract tag-assessment events and
process them.
"""
CUTOFF_DATE = datetime.datetime(2016,8,1)
def __init__(self):
"""holds activity score info unit -> lesson -> question"""
self.activity_scores = { }
self.params = {}
self.num_attempts_dict = { }
# This is a table of all the Quizly exercises currently in the course. It is used to provide a
# description in the Student Dashboard and also to validate that an instance_id is still
# currently valid.
self.quizly_desc = { # instance_id:description
'LXgF4NO50hNM':{'desc':'Quizly, Pause the Player','name':'quiz_pause_the_player'}, # Unit 2
'BtQ8hSoGkeml':{'desc':'Quizly, Stop the Player','name':'quiz_button_click_stop_player'},
'Dstsv7VuDQb5':{'desc':'Quizly, Stop Player if playing','name':'quiz_stop_player_if_playing'},
'twxBgieSEwqs':{'desc':'Quizly, If/else stop/start Player','name':'quiz_if_else_stop_start_player'},
'a3uBZXYSOJee':{'desc':'Quizly, Set background color','name':'quiz_background_color'}, # Unit 3
'pnhvzarYPPW1':{'desc':'Quizly, Set text color','name':'quiz_text_color'},
'G3qzTftPYKTe':{'desc':'Quizly, Increment a variable','name':'quiz_incrementing'},
'4kITN7u5hdsO':{'desc':'Quizly, Initialize global variable','name':'quiz_initialize_variable'},
'pCZugPUxlHeb':{'desc':'Quizly, Initializing','name':'quiz_incrementing'},
'8T30OkUf5r1r':{'desc':'Quizly, Simple if/else','name':'quiz_simple_if_else'},
'KQctST8skmaC':{'desc':'Quizly, Procedure to double a variable','name':'quiz_proc_double'},
'v2m4Ks25S1MX':{'desc':'Quizly, Procedure to add globals','name':'quiz_add_globals'},
'rCgLbJRceEbn':{'desc':'Quizly, Procedure to reset the score','name':'quiz_reset_score'}, # Unit 4
'7uowepixSjT4':{'desc':'Quizly, Procedure to calculate the hit rate','name':'quiz_calculate_hit_rate'},
'w18q4UWKxvlM':{'desc':'Quizly, Fix a bug in updateScore procedure','name':'quiz_procedure_bug'},
'rvjUJMaLZ56s':{'desc':'Quizly, If/else greater than','name':'quiz_if_x_greater_than_y'},
'scgF2VSCjUv8':{'desc':'Quizly, Simple if/else','name':'quiz_simple_if_else'}, # Unit 5
'JatcV7u6GOer':{'desc':'Quizly, If x greater than y','name':'quiz_if_x_greater_than_y'},
# No Quizly exercises beyond Unit 5 --> we should create more!
}
@staticmethod
def get_description():
return 'activity answers parser'
@staticmethod
def entity_class():
return EventEntity
def get_num_attempts_dict():
return _num_attempts_dict
@classmethod
def _get_questions_by_question_id(cls, questions_by_usage_id):
''' Retrieves every question in the course returning
them in a dict: { id:questionDAO, ... }
@param questions_by_usage_id.values() is a dict:
{unit, lesson, sequence, weight, quid}
'''
ret = {}
ret['single'] = {}
ret['grouped'] = {}
for question in questions_by_usage_id.values():
question_single = QuestionDAO.load(question['id'])
if question_single:
ret['single'][question['id']] = question_single
else:
question_group = QuestionGroupDAO.load(question['id'])
if question_group:
ret['grouped'][question['id']] = {}
for item in question_group.items:
ret['grouped'][question['id']][item['question']] = QuestionDAO.load(item['question'])
return ret
def build_additional_mapper_params(self, app_context):
questions_by_usage_id = event_transforms.get_questions_by_usage_id(app_context)
return {
'questions_by_usage_id':
questions_by_usage_id,
'valid_question_ids': (
event_transforms.get_valid_question_ids()),
'group_to_questions': (
event_transforms.get_group_to_questions()),
'assessment_weights':
event_transforms.get_assessment_weights(app_context),
'unscored_lesson_ids':
event_transforms.get_unscored_lesson_ids(app_context),
'questions_by_question_id':
ActivityScoreParser._get_questions_by_question_id(questions_by_usage_id)
}
def parse_quizly_scores(self, data, instance_id, timestamp, student, student_answers):
"""
Processes Quizly exercises extracted from the Events query using the
instance_id for the quid.
Because Quizly exercises are not officially questions -- something that
needs to be fixed -- they don't have a quid and need to be processed in
an ad-hoc way. There is no question_info dict that can be used to get
information about Quizly exercises.
Also Quizly exercises don't have a 'sequence' number within the lesson.
To get around that we assign random numbers starting at 10. One problem
with this approach is that it doesn't preserve the sequence of Quizly
exercises within the lesson.
"""
# Check that the question is still a valid question (things move around)
if not instance_id in self.quizly_desc:
return
if GLOBAL_DEBUG:
logging.debug('***RAM*** A question with instance_id = ' + str(instance_id) +
' and no quid and location = ' + str(data['location']))
url = data['location']
quizly_unit_id = int(url[url.find('unit=') + len('unit=') : url.find('&lesson=')])
quizly_lesson_id = int(url[ url.find('&lesson=') + len('&lesson=') : ])
quizly_score = data['score']
if 'answer' in data: # Takes care of legacy events that are missing answer?
quizly_answer = data['answer']
else:
quizly_answer = False
quizly_type = 'Quizly' # reported in data as SaQuestion
quizly_sequence = random.randint(10,30) #### Make up a random sequence #
# Create a dict for this Quizly exercise
question_answer_dict = {}
question_answer_dict['unit_id'] = quizly_unit_id
question_answer_dict['lesson_id'] = quizly_lesson_id
question_answer_dict['sequence'] = quizly_sequence # Not given
question_answer_dict['question_id'] = instance_id # Use instance_id as quid
if instance_id in self.quizly_desc:
question_answer_dict['description'] = self.quizly_desc[instance_id]
else:
question_answer_dict['description'] = 'Quizly ' + instance_id
question_answer_dict['question_type'] = quizly_type
question_answer_dict['timestamp'] = timestamp
question_answer_dict['answers'] = [quizly_answer] # T or F
question_answer_dict['score'] = quizly_score
question_answer_dict['weighted_score'] = quizly_score
question_answer_dict['tallied'] = False
question_answer_dict['choices'] = [{'text':'T','score':1},{'text':'F','score': 0}] # Quizly's are T/F
# Use the instance_id to count the number of attempts for each Quizly exercise by a given student
if not student.email in self.num_attempts_dict:
self.num_attempts_dict[student.email] = {}
if not instance_id in self.num_attempts_dict[student.email]:
self.num_attempts_dict[student.email][instance_id] = 1
else:
self.num_attempts_dict[student.email][instance_id] += 1
# Either add the dict to the lesson_answers or update an existing one
unit_answers = student_answers.get(quizly_unit_id, {})
lesson_answers = unit_answers.get(quizly_lesson_id, {})
found = False
for seq in lesson_answers:
if lesson_answers[seq]['question_id'] == instance_id: # Quizly already there
found = True
if GLOBAL_DEBUG:
logging.debug('***RAM*** Quizly found answer for seq = ' + str(seq))
if lesson_answers[seq]['timestamp'] < timestamp: # Already there check time
question_answer_dict['sequence'] = seq
lesson_answers[seq] = question_answer_dict # Replace it
if not found:
lesson_answers[quizly_sequence] = question_answer_dict # Add Quizly
if GLOBAL_DEBUG:
logging.debug('***RAM*** Q ' + str(quizly_unit_id) + ' ' + str(quizly_lesson_id) + ' answers after ' + str(lesson_answers))
# Add the Quizly exercise into the student's activity_scores
unit_answers[quizly_lesson_id] = lesson_answers
student_answers[quizly_unit_id] = unit_answers
self.activity_scores[student.email] = student_answers
def parse_question_scores(self, instance_id, questions, student_answers, answers, student, timestamp):
"""
Processes question scores within a given lesson.
Questions within each lesson contain a quid (question_id) and a sequence
number.
"""
question_info = questions[instance_id]
unit_answers = student_answers.get(question_info['unit'], {})
lesson_answers = unit_answers.get(question_info['lesson'], {})
question_desc = None # Get's filled in later
# answers is the unpacked answers from the Event query
for answer in answers:
if GLOBAL_DEBUG:
logging.debug('***RAM*** answer.question.id = ' + str(answer.question_id) + ' type= ' + str(answer.question_type) + ' s= ' + student.email)
# Count the number of attempts for each answer by the student
if not student.email in self.num_attempts_dict:
self.num_attempts_dict[student.email] = {}
if not answer.question_id in self.num_attempts_dict[student.email]:
self.num_attempts_dict[student.email][answer.question_id] = 1
else:
self.num_attempts_dict[student.email][answer.question_id] += 1
# Create a dict for this answer
question_answer_dict = {}
question_answer_dict['unit_id'] = answer.unit_id
question_answer_dict['lesson_id'] = answer.lesson_id
question_answer_dict['sequence'] = answer.sequence
question_answer_dict['question_id'] = answer.question_id
question_answer_dict['question_desc'] = question_desc
question_answer_dict['question_type'] = answer.question_type
question_answer_dict['timestamp'] = answer.timestamp
question_answer_dict['answers'] = answer.answers
question_answer_dict['score'] = answer.score
question_answer_dict['weighted_score'] = answer.weighted_score
question_answer_dict['tallied'] = answer.tallied
if GLOBAL_DEBUG:
logging.debug('***RAM*** Q ' + str(answer.unit_id) + ' ' + str(answer.lesson_id) + ' unit answer before ' + str(unit_answers))
logging.debug('***RAM*** McQ ' + str(answer.unit_id) + ' ' + str(answer.lesson_id) + ' answers before ' + str(lesson_answers))
# If the timestamp on this event is after the timestamp on a previous score do an update
if answer.sequence in lesson_answers and lesson_answers[answer.sequence]['timestamp'] < timestamp:
if GLOBAL_DEBUG:
logging.debug('***RAM*** lesson answers timestamp ' + str(lesson_answers[answer.sequence]) + ' < ' + str(timestamp))
lesson_answers[answer.sequence] = question_answer_dict
elif answer.sequence not in lesson_answers:
lesson_answers[answer.sequence] = question_answer_dict
if GLOBAL_DEBUG:
logging.debug('***RAM*** McQ ' + str(answer.unit_id) + ' ' + str(answer.lesson_id) + ' answers after ' + str(lesson_answers))
# Add scores for this question into the student's activity_scores
unit_answers[question_info['lesson']] = lesson_answers
student_answers[question_info['unit']] = unit_answers
self.activity_scores[student.email] = student_answers
def parse_activity_scores(self, activity_attempt):
'''
Processes activity scores recieved from the mapper.
This is called in the mapper callback function. Each time a student attempts
a GCB question or a Quizly exercise, a tag-assessment Event is created.
This processes such events to extract the number of attempts the student
made and the answers.
Events are time-stamped and recorded by user_id. They include the instance_id
of the Component that triggered the Event. Both GCB questions and Quizly
exercises have an instance_id.
However, Quizly exercises don't have question_id and need special processing.
Use the Dashboard to see what the data looks like for Events:
https://console.cloud.google.com/datastore/entities/query?
project=ram8647&ns=ns_mobileCSP&kind=EventEntity
'''
if activity_attempt.source == 'tag-assessment':
data = transforms.loads(activity_attempt.data)
instance_id = data['instanceid']
if GLOBAL_DEBUG:
logging.debug('***********RAM************** data[instanceid] = ' + instance_id)
timestamp = int(
(activity_attempt.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())
# Get information about the course's questions (doesn't include Quizly exercises yet)
questions = self.params['questions_by_usage_id']
valid_question_ids = self.params['valid_question_ids']
assessment_weights = self.params['assessment_weights']
group_to_questions = self.params['group_to_questions']
student = Student.get_by_user_id(activity_attempt.user_id)
# Get this student's answers so far
student_answers = self.activity_scores.get(student.email, {})
if GLOBAL_DEBUG:
logging.debug('***RAM*** student answers = ' + str(student_answers))
answers = event_transforms.unpack_check_answers( # No Quizly answers in here
data, questions, valid_question_ids, assessment_weights,
group_to_questions, timestamp)
# Add the score to right lesson
# NOTE: This was throwing an exception on Quizly exercises. Shouldn't happen now
try:
# If the event is tag-assessment and has no quid, it's a Quizly exercise
if not 'quid' in data:
self.parse_quizly_scores(data, instance_id, timestamp, student, student_answers)
else:
self.parse_question_scores(instance_id, questions, student_answers, answers, student, timestamp)
except Exception as e:
logging.error('***********RAM************** bad instance_id: %s %s\n%s', str(instance_id), e, traceback.format_exc())
if GLOBAL_DEBUG:
logging.debug('***RAM*** activity_scores ' + str(self.activity_scores))
return self.activity_scores
def build_missing_score(self, question, question_info, student_id, unit_id, lesson_id, sequence=-1):
''' Builds a partial question_answer_dict
This is called for each student immediately after launching the mapper query.
'''
if sequence == -1:
sequence = question['sequence']
question_answer = None
# If (unit,lesson) already in this student's activity_scores then
# question_answer is the next question_answer that matches the sequence.
if unit_id in self.activity_scores[student_id] and lesson_id in \
self.activity_scores[student_id][unit_id]:
question_answer = next((x for x in self.activity_scores[student_id][unit_id][lesson_id].values()
if x['sequence'] == sequence), None)
question_desc = None
if question_info:
question_desc = question_info.dict['description']
possible_score = 0
choices = None
choices_scores_only = []
if question_info:
if 'choices' in question_info.dict:
choices = question_info.dict['choices']
if GLOBAL_DEBUG:
logging.debug('***RAM*** choices = ' + str(choices))
# Calculate total possible points for questions by iterating
# through the answer choices and summing their individual values.
# For multiple choice questions, one choice will be 1.0 (correct)
# and the others 0.0. For multiple answer questions, each correct
# is worth 1/n of the value, where n is the number of correct choices.
# In any case, the total should typically sum to 1.0.
# Q: Is possible score always 1? If so why do we need this?
i = 0
for choice in choices:
if float(choice['score']) > 0:
possible_score += float(choice['score'])
# Calculating an abbreviated choices array that is passed back.
# We don't need the questions and answers text.
choices_scores_only.append( {'score': choice['score'], 'text': chr(ord('A') + i) } )
i = i + 1
if GLOBAL_DEBUG:
logging.debug('***RAM*** scores only = ' + str(choices_scores_only))
elif 'graders' in question_info.dict:
choices = question_info.dict['graders']
for grader in choices:
possible_score += float(grader['score'])
if 'weight' in question and float(question['weight']) is not 0.0:
possible_score = possible_score * float(question['weight'])
else:
possible_score = 1
# If there is no question_answer yet in activity_scores for this student
# construct a partial question_answer_dict with default values. Otherwise
# fill in the existing dict with values from the student's question_answer.
if not question_answer:
if GLOBAL_DEBUG:
logging.debug('***RAM*** Initializing dict for ' +
str(unit_id) + ' ' + str(lesson_id) + ' ' + str(sequence))
question_answer_dict = {}
question_answer_dict['unit_id'] = unit_id
question_answer_dict['lesson_id'] = lesson_id
question_answer_dict['sequence'] = sequence
question_answer_dict['question_id'] = question['id']
question_answer_dict['description'] = question_desc
question_answer_dict['question_type'] = 'NotCompleted'
question_answer_dict['timestamp'] = 0
question_answer_dict['answers'] = ''
question_answer_dict['score'] = 0
question_answer_dict['weighted_score'] = 0
question_answer_dict['tallied'] = False
question_answer_dict['possible_points'] = possible_score
question_answer_dict['choices'] = choices_scores_only
# question_answer_dict['choices'] = choices
unit = self.activity_scores[student_id].get(unit_id, {})
lesson = unit.get(lesson_id, {})
lesson[sequence] = question_answer_dict
else:
if GLOBAL_DEBUG:
logging.debug('***RAM*** Updating dict for ' +
str(question_answer['unit_id']) + ' ' + str(question_answer['lesson_id']) + ' ' + str(question_answer['sequence'])
+ ' score=' + str(question_answer['score']))
question_answer_dict = {}
question_answer_dict['unit_id'] = question_answer['unit_id']
question_answer_dict['lesson_id'] = question_answer['lesson_id']
question_answer_dict['sequence'] = question_answer['sequence']
question_answer_dict['question_id'] = question_answer['question_id']
question_answer_dict['description'] = question_desc
question_answer_dict['question_type'] = question_answer['question_type']
question_answer_dict['timestamp'] = question_answer['timestamp']
question_answer_dict['answers'] = question_answer['answers']
question_answer_dict['score'] = question_answer['score']
question_answer_dict['weighted_score'] = question_answer['weighted_score']
question_answer_dict['tallied'] = question_answer['tallied']
question_answer_dict['possible_points'] = possible_score
question_answer_dict['choices'] = choices_scores_only
# question_answer_dict['choices'] = choices
self.activity_scores[student_id][unit_id][lesson_id][sequence] = question_answer_dict
#validate total points for lessons, need both question collections for score and weight
def build_missing_scores(self):
''' This is called from get_activity_scores right after launching the query mapper.
For each student in activity_scores, it sets up a data dict with partial
score data that is filled in when the scores are retrieved.
'''
questions = self.params['questions_by_usage_id']
questions_info = self.params['questions_by_question_id']
for student_id in self.activity_scores:
for question in questions.values():
unit_id = question['unit']
lesson_id = question['lesson']
question_info = questions_info['single'].get(question['id'], None) #next((x for x in questions_info if x
# and
# x.id == question['id']), None)
if not question_info:
question_info_group = questions_info['grouped'][question['id']]
sequence = question['sequence']
for question_info in question_info_group.values():
self.build_missing_score(question, question_info, student_id, unit_id, lesson_id, sequence)
sequence += 1
else:
self.build_missing_score(question, question_info, student_id, unit_id, lesson_id)
@classmethod
def get_student_completion_data(cls, course):
"""Retrieves student completion data for the course."""
if GLOBAL_DEBUG:
logging.debug('***RAM*** get_student_completion_data ' + str(course))
completion_tracker = UnitLessonCompletionTracker(course)
questions_dict = completion_tracker.get_id_to_questions_dict()
# for q in questions_dict:
# logging.debug('***RAM*** key: ' + q)
# logging.debug('***RAM*** dict ' + str(questions_dict[q]))
@classmethod
def get_activity_scores(cls, student_user_ids, course, force_refresh = True):
"""Retrieve activity data for student using EventEntity.
For each student, launch a Query of EventEntities to retrieve student
scores. The Query is launched as a map-reduce background process that
will return up to 500 results, reporting back every second. It reports
back by calling the map_fn callback, which in turn calls parse_activity
scores.
As soon as the Query is launched (in the background) the foreground
process calls build_missing_scores() to construct a student_answer.dict
that will be updated as score data for that student is received.
Events properties include a userid (a number) and a source (e.g.,
tag-assessement), a recorded-on date (timestamp) and data (a dictionary).
Here's a typeical data dict:
{"loc": {"city": "mililani", "language": "en-US,en;q=0.8", "locale": "en_US",
"country": "US", "region": "hi", "long": -158.01528099999999, "lat": 21.451331,
"page_locale": "en_US"}, "instanceid": "yOkVTqWogdaF", "quid": "5733935958982656",
"score": 1, "location": "https://mobilecsp-201608.appspot.com/mobilecsp/unit?unit=1&lesson=45",
"answer": [0, 1, 2, 4], "type": "McQuestion", "user_agent":
"Mozilla/5.0 ..."}
Note that it includes the unit_id and lesson_id as part of the Url
"""
# Instantiate parser object
cached_date = datetime.datetime.now()
activityParser = ActivityScoreParser()
if force_refresh:
activityParser.params = activityParser.build_additional_mapper_params(course.app_context)
# Launch a background Query for each student's activity data. This is expensive.
for user_id in student_user_ids:
# if GLOBAL_DEBUG:
# logging.debug('***RAM*** launching a query for student ' + str(user_id))
mapper = models_utils.QueryMapper(
EventEntity.all().filter('user_id in', [user_id]) \
.filter('recorded_on >= ', cls.CUTOFF_DATE), \
batch_size=1000, report_every=1000)
# Callback function -- e.g., 45-50 callbacks per query
def map_fn(activity_attempt):
# if GLOBAL_DEBUG:
# logging.debug('***RAM*** map_fn ' + str(activity_attempt))
activityParser.parse_activity_scores(activity_attempt)
mapper.run(map_fn)
# In the foreground create the student_answer_dict, which is stored at:
# activity_scores[student][unit][lesson][sequence] where sequence is
# the question's sequential position within the lesson.
# So each question in the lesson will have a question_answer_dict.
activityParser.build_missing_scores()
#Lets cache results for each student
for user_id in student_user_ids:
cached_student_data = {}
cached_student_data['date'] = cached_date
student = Student.get_by_user_id(user_id)
cached_student_data['scores'] = activityParser.activity_scores.get(student.email, {})
cached_student_data['attempts'] = activityParser.num_attempts_dict.get(student.email, {})
MemcacheManager.set(cls._memcache_key_for_student(student.email),cached_student_data)
else:
uncached_students = []
for student_id in student_user_ids:
if student_id != '':
student = Student.get_by_user_id(student_id)
temp_email = student.email
temp_mem = cls._memcache_key_for_student(temp_email)
scores_for_student = MemcacheManager.get(temp_mem)
if scores_for_student:
cached_date = scores_for_student['date']
activityParser.activity_scores[student_id] = scores_for_student['scores']
activityParser.num_attempts_dict[student_id] = scores_for_student['scores']
else:
uncached_students.append(student_id)
if len(uncached_students) > 0:
if cached_date == None or datetime.datetime.now() < cached_date:
cached_date = datetime.datetime.now()
activityParser.params = activityParser.build_additional_mapper_params(course.app_context)
for user_id in uncached_students:
mapper = models_utils.QueryMapper(
EventEntity.all().filter('user_id in', [user_id]) \
.filter('recorded_on >= ', cls.CUTOFF_DATE), \
batch_size=1000, report_every=1000)
def map_fn(activity_attempt):
activityParser.parse_activity_scores(activity_attempt)
mapper.run(map_fn)
activityParser.build_missing_scores()
#Lets cache results for each student
for user_id in uncached_students:
cached_student_data = {}
cached_student_data['date'] = cached_date
student = Student.get_by_user_id(user_id)
cached_student_data['scores'] = activityParser.activity_scores.get(student.email, {})
MemcacheManager.set(cls._memcache_key_for_student(student.email),cached_student_data)
score_data = {}
score_data['date'] = cached_date
score_data['scores'] = activityParser.activity_scores
score_data['attempts'] = activityParser.num_attempts_dict
if GLOBAL_DEBUG:
logging.debug('***RAM*** get_activity_scores returning scores: ' + str(score_data['scores']))
return score_data
@classmethod
def _memcache_key_for_student(cls, user_id):
return ('activityscores:%s' % user_id)
| |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 20:17:23 2016
@author: jasonniu
#CLRS Exercises and PS
"""
# Ex2.1-3
def linear_search(data, num):
# Ex2.1-3
for i in range(len(data)):
if num == data[i]:
print 'Found it! ', i, data[i]
return i
print 'Not found'
return None
# Ex2.1-4
def add_binary_list(a, b):
# a, b are binary int in arrays
# list: pass by reference
if len(a) > len(b):
big = a[:]
small = b[:]
else:
big = b[:]
small = a[:]
#c = big[:]
c = [0 for x in range(len(big))]
c.insert(0, 0)
for i in range(-1, -len(big) - 1, -1):
if(i < - len(small)):
temp_s = 0
else:
temp_s = int(small[i])
temp_b = int(big[i])
print i, temp_s, temp_b
print big, small, c
temp_sum = temp_s + temp_b + c[i]
if temp_sum >= 2:
# should be between 0 and 3
c[i] = (temp_sum) % 2
#big[i - 1] = int(big[i - 1]) + (temp_s + temp_b)/2
c[i - 1] = int(c[i - 1]) + (temp_sum)/2
else:
c[i] = temp_sum
print big, small, c
print a, b, c
return c
def verify_add_binary_list(a, b, c):
A = ''
B = ''
C = ''
for i in a:
A += str(i)
for i in b:
B += str(i)
for i in c:
C += str(i)
A = int(A, 2)
B = int(B, 2)
C = int(C, 2)
print A, B, C
assert A + B == C, 'Not Equal'
def Test_Ex214():
a = [1,0,0,0]
b = [1,1,0, 1,1,1, 0]
c = add_binary_list(a, b)
verify_add_binary_list(a, b, c)
#Test_Ex214()
# Ex2.3-4
# T(n) = T(n-1) + n
# => O(n^2)
# Ex2.3-5
def binary_search(data, lo, hi, a):
if lo > hi:
return 'Not found'
elif lo == hi and data[lo] != a:
return 'Not found'
mid = lo + (hi - lo)/2
if data[mid] > a:
return binary_search(data, lo, mid - 1, a)
elif data[mid] == a:
return mid
else:
return binary_search(data, mid + 1, hi, a)
#print binary_search([1,2,3,6,7], 0, 4, 1)
# PS 2-3
# a. O(n)
# b. O(n^2)
'''PS 2-4
a. (2, 1), (3, 1), (8, 1), (6, 1), (8, 6)
b. sorted desc, (n - 1) + (n - 2) +... 1 -> n*(n-1)/2
'''
import sys
# Ex4.1-2
def max_subarray_brute_force(A, lo = None, hi = None):
# find the max sub array by brute-force method
res = []
global_max = -sys.maxint
global_max_idx = -sys.maxint
if lo == None:
lo = 0
if hi == None:
hi = len(A) - 1
#print lo, hi
for i in range(lo, hi + 1):
temp = []
temp_sum = 0
max_sum = -sys.maxint
for j in range(i, hi + 1):
temp_sum += A[j]
if temp_sum > max_sum:
max_sum = temp_sum
temp = [i, j, max_sum]
res.append(temp)
if temp[2] > global_max:
global_max_idx = i - lo
global_max = temp[2]
#print res, global_max_idx
#print res[global_max_idx]
return res[global_max_idx]
def find_max_crossing_subarray(A, low, mid, high):
# C4.1
# high: index of last element
# treat mid in the left section
#print low, mid, high
max_sum_r = -sys.maxint
max_sum_l = -sys.maxint
max_r_idx = 0
max_l_idx = 0
temp_sum_r = 0
temp_sum_l = 0
for i in range(mid + 1, high + 1):
if temp_sum_r + A[i] > max_sum_r:
max_r_idx = i
max_sum_r = temp_sum_r + A[i]
temp_sum_r += A[i]
# Then left
#if mid - low > 0:
for i in range(mid, low - 1, -1):
if temp_sum_l + A[i] > max_sum_l:
max_l_idx = i
max_sum_l = temp_sum_l + A[i]
temp_sum_l += A[i]
max_sum = max_sum_r + max_sum_l
#print 'r', mid, high, max_r_idx, max_sum_r, A[max_r_idx]
#print 'l', mid, low, max_l_idx, max_sum_l, A[max_l_idx]
result = [max_l_idx, max_r_idx, max_sum]
return result
def max_sub_array(A, p, r):
# r: index of last item
if p == r:
return [p, r, A[p]]
if p < r:
q = (p + r)/2
#print 'a', p, q, r
[l1, r1, max_sum1] = max_sub_array(A, p, q)
[l2, r2, max_sum2] = max_sub_array(A, q + 1, r)
[l3, r3, max_sum3] = find_max_crossing_subarray(A, p, q, r)
#print 'compare:', p, q, r, 'val:', max_sum1, max_sum2, max_sum3
if max_sum3 >= max_sum1 and max_sum3 >= max_sum2:
return [l3, r3, max_sum3]
elif max_sum2 >= max_sum3 and max_sum2 >= max_sum1:
return [l2, r2, max_sum2]
else:
return [l1, r1, max_sum1]
return 'Not found'
def max_sub_array_mixed(A, p, r):
# r: index of last item
if p == r:
return [p, r, A[p]]
if r - p < 20:
#print A, p, r
return max_subarray_brute_force(A, p, r)
if p < r:
q = (p + r)/2
#print 'a', p, q, r
[l1, r1, max_sum1] = max_sub_array_mixed(A, p, q)
[l2, r2, max_sum2] = max_sub_array_mixed(A, q + 1, r)
[l3, r3, max_sum3] = find_max_crossing_subarray(A, p, q, r)
#print 'compare:', p, q, r, 'val:', max_sum1, max_sum2, max_sum3
if max_sum3 >= max_sum1 and max_sum3 >= max_sum2:
return [l3, r3, max_sum3]
elif max_sum2 >= max_sum3 and max_sum2 >= max_sum1:
return [l2, r2, max_sum2]
else:
return [l1, r1, max_sum1]
return 'Not found'
def benchmark(func, n, rep = 1, asc = True):
# benchmark and validate
#n = 1E4
lo = 1
hi = 10000
total_t = 0
i = 0
while i < rep:
data = stress_test_prep(int(n), lo, hi)
#print data
start = time.time()
#nums = func(data, asc)
# For merge sort:
nums = func(data, 0, len(data) - 1)
end = time.time()
total_t += end - start
i += 1
print 'result: ', nums
print 'Func execution time: ', total_t/float(rep)
#print nums
#validate_sort(nums, asc)
def stress_test_prep(n, lo, hi):
data = []
random.seed(1)
for i in range(int(n)):
a = random.randint(lo, hi)
#b = random.randint(a, hi)
#temp = (a, b)
#print temp
data.append(a)
return data
A = [13, -3, -25, 20, -3, -16, -23, 18, 20, -7, 12, -5, -22, 15, -4, 7, 3, 7, 5, 3, 9, 11]
#A = [0]
#A = [13, 1, -2]
#print max_subarray_brute_force(A, 0, len(A) - 1)
#print max_sub_array(A, 0, len(A) - 1)
#print max_sub_array_mixed(A, 0, len(A) - 1)
#print max_subarray_brute_force(A, 11,21)
def Ex413():
n = 1e3
rep = 1
#benchmark(max_subarray_brute_force, n, rep)
#benchmark(max_sub_array, n, rep)
#benchmark(max_sub_array_mixed, n, rep)
benchmark(max_subarray_linear, n, rep)
sol = '''
n = 15, 4.2e-5s VS 4.5e-5s
n = 16, 4.6e-5s VS 5e-5s
Crossover point is about 16 to 20
When n is large, > 1000 or so, mixed version is about 10% faster
'''
print sol
#Ex413()
def max_subarray_linear(A, i, j):
'''
First find the sum up to index k for every k
Then out of the max(sums), the max subarray is either the sums[j], or A[i..j+1]
'''
temp_max = -sys.maxint
s = 0
temp_i = j
sums = []
temp_sum = 0
for k in range(0, len(A) - 1):
temp_sum += A[k]
sums.append(temp_sum)
temp_max = max(sums)
j = sums.index(temp_max)
#print sums, temp_max, j
for k in range(j + 1, -1, -1):
s += A[k]
if s >= temp_max:
temp_max = s
temp_i = k
#print temp_i, j + 1, temp_max
#print A[temp_i : j + 1]
return [temp_i, j + 1, temp_max]
def Ex415():
'''
1e7: 3.4s
'''
A = [13, -3, -25, 20, -3, -16, -23, 18, 20, -7, 12, -5, -22, 15, -4, 7, 3, 7, 5, 3, 9, 11]
max_subarray_linear(A, 0, len(A) - 1)
#benchmark(max_subarray_linear, n, rep)
Ex415()
| |
from __future__ import print_function
# Copyright (c) 2017, Simon Brodeur
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the NECOTIS research group nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import logging
import scipy.signal
import scipy.io.wavfile
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as plt3d
from scipy import signal
from evert import Room, Polygon, Vector3, Source, Listener, PathSolution
logger = logging.getLogger(__name__)
CDIR = os.path.dirname(os.path.realpath(__file__))
class MaterialAbsorptionTable(object):
# From: Auralization : fundamentals of acoustics, modelling, simulation, algorithms and acoustic virtual reality
categories = ['hard surfaces', 'linings', 'glazing', 'wood',
'floor coverings', 'curtains']
frequencies = [125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0]
materials = [[ # Massive constructions and hard surfaces
"average", # Walls, hard surfaces average (brick walls, plaster, hard floors, etc.)
"walls rendered brickwork", # Walls, rendered brickwork
"rough concrete", # Rough concrete
"smooth unpainted concrete", # Smooth unpainted concrete
"rough lime wash", # Rough lime wash
"smooth brickwork with flush pointing, painted", # Smooth brickwork with flush pointing, painted
"smooth brickwork, 10 mm deep pointing, pit sand mortar", # Smooth brickwork, 10 mm deep pointing, pit sand mortar
"brick wall, stuccoed with a rough finish", # Brick wall, stuccoed with a rough finish
"ceramic tiles with a smooth surface", # Ceramic tiles with a smooth surface
"limestone walls", # Limestone walls
"reverberation chamber walls", # Reverberation chamber walls
"concrete", # Concrete floor
"marble floor", # Marble floor
],
[ # Lightweight constructions and linings
"plasterboard on steel frame", # 2 * 13 mm plasterboard on steel frame, 50 mm mineral wool in cavity, surface painted
"wooden lining", # Wooden lining, 12 mm fixed on frame
],
[ # Glazing
"single pane of glass", # Single pane of glass, 3 mm
"glass window", # Glass window, 0.68 kg/m^2
"lead glazing", # Lead glazing
"double glazing, 30 mm gap", # Double glazing, 2-3 mm glass, > 30 mm gap
"double glazing, 10 mm gap ", # Double glazing, 2-3 mm glass, 10 mm gap
"double glazing, lead on the inside", # Double glazing, lead on the inside
],
[ # Wood
"wood, 1.6 cm thick", # Wood, 1.6 cm thick, on 4 cm wooden planks
"thin plywood panelling", # Thin plywood panelling
"16 mm wood on 40 mm studs", # 16 mm wood on 40 mm studs
"audience floor", # Audience floor, 2 layers, 33 mm on sleepers over concrete
"stage floor", # Wood, stage floor, 2 layers, 27 mm over airspace
"solid wooden door", # Solid wooden door
],
[ # Floor coverings
"linoleum, asphalt, rubber, or cork tile on concrete", # Linoleum, asphalt, rubber, or cork tile on concrete
"cotton carpet", # Cotton carpet
"loop pile tufted carpet", # Loop pile tufted carpet, 1.4 kg/m^2, 9.5 mm pile height: On hair pad, 3.0kg/m^2
"thin carpet", # Thin carpet, cemented to concrete
"pile carpet bonded to closed-cell foam underlay", # 6 mm pile carpet bonded to closed-cell foam underlay
"pile carpet bonded to open-cell foam underlay", # 6 mm pile carpet bonded to open-cell foam underlay
"tufted pile carpet", # 9 mm tufted pile carpet on felt underlay
"needle felt", # Needle felt 5 mm stuck to concrete
"soft carpet", # 10 mm soft carpet on concrete
"hairy carpet", # Hairy carpet on 3 mm felt
"rubber carpet", # 5 mm rubber carpet on concrete
"carpet on hair felt or foam rubber", # Carpet 1.35 kg/m^2, on hair felt or foam rubber
"cocos fibre roll felt", # Cocos fibre roll felt, 29 mm thick (unstressed), reverse side clad with paper, 2.2kg/m^2, 2 Rayl
],
[ # Curtains
"cotton curtains", # Cotton curtains (0.5 kg/m^2) draped to 3/4 area approx. 130 mm from wall
"curtains", # Curtains (0.2 kg/m^2) hung 90 mm from wall
"cotton cloth", # Cotton cloth (0.33 kg/m^2) folded to 7/8 area
"densely woven window curtains", # Densely woven window curtains 90 mm from wall
"vertical blinds, half opened", # Vertical blinds, 15 cm from wall, half opened (45 deg)
"vertical blinds, open", # Vertical blinds, 15 cm from wall, open (90 deg)
"tight velvet curtains", # Tight velvet curtains
"curtain fabric", # Curtain fabric, 15 cm from wall
"curtain fabric, folded", # Curtain fabric, folded, 15 cm from wall
"curtain of close-woven glass mat", # Curtains of close-woven glass mat hung 50 mm from wall
"studio curtain", # Studio curtains, 22 cm from wall
],
]
# Tables of random-incidence absorption coefficients
table = [ [ # Massive constructions and hard surfaces
[0.02, 0.02, 0.03, 0.03, 0.04, 0.05, 0.05], # Walls, hard surfaces average (brick walls, plaster, hard floors, etc.)
[0.01, 0.02, 0.02, 0.03, 0.03, 0.04, 0.04], # Walls, rendered brickwork
[0.02, 0.03, 0.03, 0.03, 0.04, 0.07, 0.07], # Rough concrete
[0.01, 0.01, 0.02, 0.02, 0.02, 0.05, 0.05], # Smooth unpainted concrete
[0.02, 0.03, 0.04, 0.05, 0.04, 0.03, 0.02], # Rough lime wash
[0.01, 0.01, 0.02, 0.02, 0.02, 0.02, 0.02], # Smooth brickwork with flush pointing, painted
[0.08, 0.09, 0.12, 0.16, 0.22, 0.24, 0.24], # Smooth brickwork, 10 mm deep pointing, pit sand mortar
[0.03, 0.03, 0.03, 0.04, 0.05, 0.07, 0.07], # Brick wall, stuccoed with a rough finish
[0.01, 0.01, 0.01, 0.02, 0.02, 0.02, 0.02], # Ceramic tiles with a smooth surface
[0.02, 0.02, 0.03, 0.04, 0.05, 0.05, 0.05], # Limestone walls
[0.01, 0.01, 0.01, 0.02, 0.02, 0.04, 0.04], # Reverberation chamber walls
[0.01, 0.03, 0.05, 0.02, 0.02, 0.02, 0.02], # Concrete floor
[0.01, 0.01, 0.01, 0.02, 0.02, 0.02, 0.02], # Marble floor
],
[ # Lightweight constructions and linings
[0.15, 0.10, 0.06, 0.04, 0.04, 0.05, 0.05], # 2 * 13 mm plasterboard on steel frame, 50 mm mineral wool in cavity, surface painted
[0.27, 0.23, 0.22, 0.15, 0.10, 0.07, 0.06], # Wooden lining, 12 mm fixed on frame
],
[ # Glazing
[0.08, 0.04, 0.03, 0.03, 0.02, 0.02, 0.02], # Single pane of glass, 3 mm
[0.10, 0.05, 0.04, 0.03, 0.03, 0.03, 0.03], # Glass window,, 0.68 kg/m^2
[0.30, 0.20, 0.14, 0.10, 0.05, 0.05, 0.05], # Lead glazing
[0.15, 0.05, 0.03, 0.03, 0.02, 0.02, 0.02], # Double glazing, 2-3 mm glass, > 30 mm gap
[0.10, 0.07, 0.05, 0.03, 0.02, 0.02, 0.02], # Double glazing, 2-3 mm glass, 10 mm gap
[0.15, 0.30, 0.18, 0.10, 0.05, 0.05, 0.05], # Double glazing, lead on the inside
],
[ # Wood
[0.18, 0.12, 0.10, 0.09, 0.08, 0.07, 0.07], # Wood, 1.6 cm thick, on 4 cm wooden planks
[0.42, 0.21, 0.10, 0.08, 0.06, 0.06, 0.06], # Thin plywood panelling
[0.18, 0.12, 0.10, 0.09, 0.08, 0.07, 0.07], # 16 mm wood on 40 mm studs
[0.09, 0.06, 0.05, 0.05, 0.05, 0.04, 0.04], # Audience floor, 2 layers, 33 mm on sleepers over concrete
[0.10, 0.07, 0.06, 0.06, 0.06, 0.06, 0.06], # Wood, stage floor, 2 layers, 27 mm over airspace
[0.14, 0.10, 0.06, 0.08, 0.10, 0.10, 0.10], # Solid wooden door
],
[ # Floor coverings
[0.02, 0.03, 0.03, 0.03, 0.03, 0.02, 0.02], # Linoleum, asphalt, rubber, or cork tile on concrete
[0.07, 0.31, 0.49, 0.81, 0.66, 0.54, 0.48], # Cotton carpet
[0.10, 0.40, 0.62, 0.70, 0.63, 0.88, 0.88], # Loop pile tufted carpet, 1.4 kg/m^2, 9.5 mm pile height: On hair pad, 3.0kg/m^2
[0.02, 0.04, 0.08, 0.20, 0.35, 0.40, 0.40], # Thin carpet, cemented to concrete
[0.03, 0.09, 0.25, 0.31, 0.33, 0.44, 0.44], # 6 mm pile carpet bonded to closed-cell foam underlay
[0.03, 0.09, 0.20, 0.54, 0.70, 0.72, 0.72], # 6 mm pile carpet bonded to open-cell foam underlay
[0.08, 0.08, 0.30, 0.60, 0.75, 0.80, 0.80], # 9 mm tufted pile carpet on felt underlay
[0.02, 0.02, 0.05, 0.15, 0.30, 0.40, 0.40], # Needle felt 5 mm stuck to concrete
[0.09, 0.08, 0.21, 0.26, 0.27, 0.37, 0.37], # 10 mm soft carpet on concrete
[0.11, 0.14, 0.37, 0.43, 0.27, 0.25, 0.25], # Hairy carpet on 3 mm felt
[0.04, 0.04, 0.08, 0.12, 0.10, 0.10, 0.10], # 5 mm rubber carpet on concrete
[0.08, 0.24, 0.57, 0.69, 0.71, 0.73, 0.73], # Carpet 1.35 kg/m^2, on hair felt or foam rubber
[0.10, 0.13, 0.22, 0.35, 0.47, 0.57, 0.57], # Cocos fibre roll felt, 29 mm thick (unstressed), reverse side clad with paper, 2.2kg/m^2, 2 Rayl
],
[ # Curtains
[0.30, 0.45, 0.65, 0.56, 0.59, 0.71, 0.71], # Cotton curtains (0.5 kg/m^2) draped to 3/4 area approx. 130 mm from wall
[0.05, 0.06, 0.39, 0.63, 0.70, 0.73, 0.73], # Curtains (0.2 kg/m^2) hung 90 mm from wall
[0.03, 0.12, 0.15, 0.27, 0.37, 0.42, 0.42], # Cotton cloth (0.33 kg/m^2) folded to 7/8 area
[0.06, 0.10, 0.38, 0.63, 0.70, 0.73, 0.73], # Densely woven window curtains 90 mm from wall
[0.03, 0.09, 0.24, 0.46, 0.79, 0.76, 0.76], # Vertical blinds, 15 cm from wall, half opened (45 deg)
[0.03, 0.06, 0.13, 0.28, 0.49, 0.56, 0.56], # Vertical blinds, 15 cm from wall, open (90 deg)
[0.05, 0.12, 0.35, 0.45, 0.38, 0.36, 0.36], # Tight velvet curtains
[0.10, 0.38, 0.63, 0.52, 0.55, 0.65, 0.65], # Curtain fabric, 15 cm from wall
[0.12, 0.60, 0.98, 1.00, 1.00, 1.00, 1.00], # Curtain fabric, folded, 15 cm from wall
[0.03, 0.03, 0.15, 0.40, 0.50, 0.50, 0.50], # Curtains of close-woven glass mat hung 50 mm from wall
[0.36, 0.26, 0.51, 0.45, 0.62, 0.76, 0.76], # Studio curtains, 22 cm from wall
],
]
@staticmethod
def getAbsorptionCoefficients(category, material):
category = category.lower().strip()
if category not in MaterialAbsorptionTable.categories:
raise Exception('Unknown category for material absorption table: %s' % (category))
categoryIdx = MaterialAbsorptionTable.categories.index(category)
material = material.lower().strip()
if material not in MaterialAbsorptionTable.materials[categoryIdx]:
raise Exception('Unknown material for category %s in material absorption table: %s' % (category, material))
materialIdx = MaterialAbsorptionTable.materials[categoryIdx].index(material)
coefficients = np.array(MaterialAbsorptionTable.table[categoryIdx][materialIdx])
frequencies = np.array(AirAttenuationTable.frequencies)
eps = np.finfo(np.float).eps
coefficientsDb = 20.0 * np.log10(1.0 - coefficients + eps)
return coefficientsDb, frequencies
class AirAttenuationTable(object):
# From: Auralization : fundamentals of acoustics, modelling, simulation, algorithms and acoustic virtual reality
temperatures = [10.0, 20.0]
relativeHumidities = [40.0, 60.0, 80.0]
frequencies = [125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0]
# Air attenuation coefficient, in 10^-3 / m
table = [ [ # 10 deg C
[0.1, 0.2, 0.5, 1.1, 2.7, 9.4, 29.0], # 30-50% hum
[0.1, 0.2, 0.5, 0.8, 1.8, 5.9, 21.1], # 50-70% hum
[0.1, 0.2, 0.5, 0.7, 1.4, 4.4, 15.8], # 70-90% hum
],
[ # 20 deg C
[0.1, 0.3, 0.6, 1.0, 1.9, 5.8, 20.3], # 30-50% hum
[0.1, 0.3, 0.6, 1.0, 1.7, 4.1, 13.5], # 50-70% hum
[0.1, 0.3, 0.6, 1.1, 1.7, 3.5, 10.6], # 70-90% hum
]
]
@staticmethod
def getAttenuations(distance, temperature, relativeHumidity):
closestTemperatureIdx = np.argmin(np.sqrt((np.array(AirAttenuationTable.temperatures) - temperature)**2))
closestHumidityIdx = np.argmin(np.sqrt((np.array(AirAttenuationTable.relativeHumidities) - relativeHumidity)**2))
attenuations = np.array(AirAttenuationTable.table[closestTemperatureIdx][closestHumidityIdx])
frequencies = np.array(AirAttenuationTable.frequencies)
eps = np.finfo(np.float).eps
attenuations = np.clip(distance * 1e-3 * attenuations, 0.0, 1.0 - eps)
attenuationsDb = 20.0 * np.log10(1.0 - attenuations)
return attenuationsDb, frequencies
class FilterBank(object):
def __init__(self, n, centerFrequencies, samplingRate):
self.n = n
if n % 2 == 0:
self.n = n + 1
logger.warn('Length of the FIR filter adjusted to the next odd number to ensure symmetry: %d' % (self.n))
else:
self.n = n
self.centerFrequencies = centerFrequencies
self.samplingRate = samplingRate
centerFrequencies = np.array(centerFrequencies, dtype=np.float)
centerNormFreqs = centerFrequencies/(self.samplingRate/2.0)
cutoffs = centerNormFreqs[:-1] + np.diff(centerNormFreqs)/2
filters = []
for i in range(len(centerFrequencies)):
if i == 0:
# Low-pass filter
b = scipy.signal.firwin(self.n, cutoff=cutoffs[0], window='hamming')
elif i == len(centerFrequencies) - 1:
# High-pass filter
b = scipy.signal.firwin(self.n, cutoff=cutoffs[-1], window = 'hamming', pass_zero=False)
else:
# Band-pass filter
b = scipy.signal.firwin(self.n, [cutoffs[i-1], cutoffs[i]], pass_zero=False)
filters.append(b)
self.filters = np.array(filters)
def getScaledImpulseResponse(self, scales=1):
if not isinstance(scales, (list, tuple)):
scales = scales * np.ones(len(self.filters))
return np.sum(self.filters * scales[:, np.newaxis], axis=0)
def display(self, scales=1, merged=False):
# Adapted from: http://mpastell.com/2010/01/18/fir-with-scipy/
if merged:
b = self.getScaledImpulseResponse(scales)
filters = [b]
else:
filters = np.copy(self.filters)
if not isinstance(scales, (list, tuple)):
scales = scales * np.ones(len(filters))
filters *= scales[:,np.newaxis]
fig = plt.figure(figsize=(8,6), facecolor='white', frameon=True)
for b in filters:
w,h = signal.freqz(b,1)
h_dB = 20 * np.log10(abs(h))
plt.subplot(211)
plt.plot(w/max(w),h_dB)
plt.ylim(-150, 5)
plt.ylabel('Magnitude (db)')
plt.xlabel(r'Normalized Frequency (x$\pi$rad/sample)')
plt.title(r'Frequency response')
plt.subplot(212)
h_Phase = np.unwrap(np.arctan2(np.imag(h),np.real(h)))
plt.plot(w/max(w),h_Phase)
plt.ylabel('Phase (radians)')
plt.xlabel(r'Normalized Frequency (x$\pi$rad/sample)')
plt.title(r'Phase response')
plt.subplots_adjust(hspace=0.5)
return fig
class EvertAcousticRoom(Room):
materials = [ # category, # material name # index
['hard surfaces', 'average' ], # 0
['hard surfaces', 'concrete' ], # 1
['glazing', 'glass window' ], # 2
['wood', 'wood, 1.6 cm thick'], # 3
['floor coverings', 'linoleum' ], # 4
['floor coverings', 'soft carpet' ], # 5
['curtains', 'cotton curtains' ], # 6
]
def __init__(self, samplingRate=16000, maximumOrder=3,
materialAbsorption=True, frequencyDependent=True):
super(EvertAcousticRoom,self).__init__()
self.samplingRate = samplingRate
self.maximumOrder = maximumOrder
self.materialAbsorption = materialAbsorption
self.frequencyDependent = frequencyDependent
self.filterbank = FilterBank(n=513,
centerFrequencies=MaterialAbsorptionTable.frequencies,
samplingRate=samplingRate)
self.setAirConditions()
def getMaterialIdByName(self, name):
idx = None
for i, (_, materialName) in enumerate(EvertAcousticRoom.materials):
if materialName == name:
idx = i
break
if idx is None:
raise Exception('Unknown material %s' % (name))
return idx
def setAirConditions(self, temperature=20.0, relativeHumidity=65.0):
self.temperature = temperature
self.relativeHumidity = relativeHumidity
def _calculateSoundSpeed(self):
# Approximate speed of sound in dry (0% humidity) air, in meters per second, at temperatures near 0 deg C
return 331.3*np.sqrt(1+self.temperature/273.15)
def _calculateDelayAndAttenuation(self, path):
# Calculate path length and corresponding delay
pathLength = 0.0
lastPt = path.m_points[0]
for pt in path.m_points[1:]:
pathLength += np.sqrt((lastPt.x - pt.x)**2 +
(lastPt.y - pt.y)**2 +
(lastPt.z - pt.z)**2)
lastPt = pt
pathLength = pathLength / 1000.0 # mm to m
delay = pathLength/self._calculateSoundSpeed()
# Calculate air attenuation coefficient (dB)
airAttenuations, frequencies = AirAttenuationTable.getAttenuations(pathLength, self.temperature, self.relativeHumidity)
# Calculate spherical geometric spreading attenuation (dB)
distanceAttenuations = 20.0 * np.log10(1.0/pathLength)
# Calculat material attenuation (dB)
materialAttenuations = np.zeros((len(MaterialAbsorptionTable.frequencies),))
if self.materialAbsorption:
for polygon in path.m_polygons:
materialId = polygon.getMaterialId()
category, material = EvertAcousticRoom.materials[materialId]
materialAbsoption, _ = MaterialAbsorptionTable.getAbsorptionCoefficients(category, material)
materialAttenuations += materialAbsoption
# Total attenuation (dB)
attenuation = airAttenuations + distanceAttenuations + materialAttenuations
return delay, attenuation, frequencies
def calculateImpulseResponse(self, solution, maxImpulseLength=1.0, threshold=120.0, pathFilter=None):
impulse = np.zeros((int(maxImpulseLength * self.samplingRate),))
realImpulseLength = 0.0
for i in range(solution.numPaths()):
path = solution.getPath(i)
delay, attenuationsDb, _ = self._calculateDelayAndAttenuation(path)
if pathFilter is not None and not pathFilter(path): continue
# Add path impulse to global impulse
delaySamples = int(delay * self.samplingRate)
# Skip paths that are below attenuation threshold (dB)
if np.any(abs(attenuationsDb) < threshold):
if self.frequencyDependent:
# Skip paths that would have their impulse responses truncated at the end
if delaySamples + self.filterbank.n < len(impulse):
linearGains = 1.0/np.exp(-attenuationsDb/20.0 * np.log(10.0))
pathImpulse = self.filterbank.getScaledImpulseResponse(linearGains)
# Random phase inversion
if np.random.random() > 0.5:
pathImpulse *= -1
startIdx = delaySamples - self.filterbank.n/2
endIdx = startIdx + self.filterbank.n - 1
if startIdx < 0:
trimStartIdx = -startIdx
startIdx = 0
else:
trimStartIdx = 0
impulse[startIdx:endIdx+1] += pathImpulse[trimStartIdx:]
if endIdx+1 > realImpulseLength:
realImpulseLength = endIdx+1
else:
# Use attenuation at 1000 Hz
linearGain = 1.0/np.exp(-attenuationsDb[3]/20.0 * np.log(10.0))
# Random phase inversion
sign = 1.0
if np.random.random() > 0.5:
sign *= -1
impulse[delaySamples] += linearGain * sign
if delaySamples+1 > realImpulseLength:
realImpulseLength = delaySamples+1
return impulse[:realImpulseLength]
def getSolutions(self):
self.constructBSP()
solutions = []
for s in range(self.numSources()):
for l in range(self.numListeners()):
src = self.getSource(s)
lst = self.getListener(l)
solution = PathSolution(self, src, lst, self.maximumOrder)
solution.update()
solutions.append(solution)
return solutions
if __name__ == '__main__':
# Load audio file
fs, signal = scipy.io.wavfile.read(os.path.join(CDIR, 'pop.wav'))
fs = int(fs)
signal = np.array(signal, dtype=np.float)
signal /= np.max(np.abs(signal))
t = np.arange(len(signal), dtype=np.float)/fs
plt.figure()
plt.title('Source signal')
plt.plot(t, signal)
plt.xlabel('Time [sec]')
plt.ylabel('Amplitude')
# Create acoustic environment
room = EvertAcousticRoom(samplingRate=fs, maximumOrder=10, materialAbsorption=True, frequencyDependent=True)
# Define a simple rectangular (length x width x height) as room geometry, with average hard-surface walls
length = 20000 # mm
width = 10000 # mm
height = 2500 # mm
face1poly = Polygon([Vector3(0,0,0), Vector3(0,width,0), Vector3(length,width,0), Vector3(length,0,0)])
face2poly = Polygon([Vector3(0,0,0), Vector3(0,width,0), Vector3(0,width,height), Vector3(0,0,height)])
face3poly = Polygon([Vector3(0,0,0), Vector3(length,0,0), Vector3(length,0,height), Vector3(0,0,height)])
face4poly = Polygon([Vector3(0,0,height), Vector3(0,width,height), Vector3(length,width,height), Vector3(length,0,height)])
face5poly = Polygon([Vector3(0,width,height), Vector3(0,width,0), Vector3(length,width,0), Vector3(length,width,height)])
face6poly = Polygon([Vector3(length,0,height), Vector3(length,width,height), Vector3(length,width,0), Vector3(length,0,0)])
roomPolygons = [face1poly, face2poly, face3poly, face4poly, face5poly, face6poly]
for polygon in roomPolygons:
polygon.setMaterialId(room.getMaterialIdByName('cotton curtains'))
room.addPolygon(polygon, color=Vector3(0.5,0.5,0.5))
# Print some room information
center = room.getCenter()
print('Room maximum length: ', room.getMaxLength())
print('Room center: x=%f, y=%f, z=%f' % (center.x, center.y, center.z))
print('Number of elements: ', room.numElements())
print('Number of convex elements: ', room.numConvexElements())
# Display room layout
ax = plt3d.Axes3D(plt.figure())
for polygon in roomPolygons:
vtx = []
for i in range(polygon.numPoints()):
vtx.append([polygon[i].x, polygon[i].y, polygon[i].z])
vtx = np.array(vtx)
tri = plt3d.art3d.Poly3DCollection([vtx])
tri.set_color([0.5, 0.5, 0.5, 0.25])
tri.set_edgecolor('k')
ax.add_collection3d(tri)
ax.set_xlim(0, length)
ax.set_ylim(0, width)
ax.set_zlim(0, height)
ax.set_xlabel("x axis [mm]")
ax.set_ylabel("y axis [mm]")
ax.set_zlabel("z axis [mm]")
ax.invert_xaxis()
ax.invert_yaxis()
# Define a simple pyramidal object in the middle of the room, with concrete walls
width = 5000 # mm
height = 1500 # mm
pos = Vector3(7500, 5000, 1000)
p = Vector3(pos.x-width/2, pos.y-width/2, pos.z-height/2)
face1poly = Polygon([Vector3(p.x,p.y,p.z), Vector3(p.x,p.y+width,p.z), Vector3(p.x+width,p.y+width,p.z), Vector3(p.x+width,p.y,p.z)])
face2poly = Polygon([Vector3(p.x,p.y,p.z), Vector3(p.x,p.y+width,p.z), Vector3(p.x+width/2,p.y+width/2,p.z+height)])
face3poly = Polygon([Vector3(p.x,p.y,p.z), Vector3(p.x+width,p.y,p.z), Vector3(p.x+width/2,p.y+width/2,p.z+height)])
face4poly = Polygon([Vector3(p.x,p.y+width,p.z), Vector3(p.x+width,p.y+width,p.z), Vector3(p.x+width/2,p.y+width/2,p.z+height)])
face5poly = Polygon([Vector3(p.x+width,p.y,p.z), Vector3(p.x+width,p.y+width,p.z), Vector3(p.x+width/2,p.y+width/2,p.z+height)])
objectPolygons = [face1poly, face2poly, face3poly, face4poly, face5poly]
for polygon in objectPolygons:
polygon.setMaterialId(room.getMaterialIdByName('concrete'))
room.addPolygon(polygon, color=Vector3(0.5,0.5,0.5))
# Display object layout
for polygon in objectPolygons:
vtx = []
for i in range(polygon.numPoints()):
vtx.append([polygon[i].x, polygon[i].y, polygon[i].z])
vtx = np.array(vtx)
tri = plt3d.art3d.Poly3DCollection([vtx])
tri.set_color([1.0, 0.0, 0.0, 0.25])
tri.set_edgecolor('k')
ax.add_collection3d(tri)
# Create binaural listener localized in room
interauralDistance = 300 # mm
list1 = Listener()
list1.setPosition(Vector3(1500, 5000 - interauralDistance/2, 1000))
list1.setName('Lst-left')
room.addListener(list1)
list2 = Listener()
list2.setPosition(Vector3(1500, 5000 + interauralDistance/2, 1000))
list2.setName('Lst-right')
room.addListener(list2)
# Create source localized between the listeners
src1 = Source()
src1.setPosition(Vector3(1500,5000,1000))
src1.setName('Src')
room.addSource(src1)
# Display listeners and source layout
ax.plot([list1.getPosition().x], [list1.getPosition().y], [list1.getPosition().z], color='k', marker='o')
ax.plot([list2.getPosition().x], [list2.getPosition().y], [list2.getPosition().z], color='k', marker='o')
ax.plot([src1.getPosition().x], [src1.getPosition().y], [src1.getPosition().z], color='r', marker='o')
# Only consider reverberation paths that actually hit the object
def mustHitObjectFilter(path):
hitObject = False
for polygon in path.m_polygons:
materialId = polygon.getMaterialId()
if materialId == room.getMaterialIdByName('concrete'):
hitObject = True
break
return hitObject
# Only consider non-direct reverberation paths
def noDirectFilter(path):
return len(path.m_polygons) > 0
# Compute the reverberation paths
solutions = room.getSolutions()
impulseLeft = room.calculateImpulseResponse(solutions[0], maxImpulseLength=1.0, threshold=120.0, pathFilter=noDirectFilter)
impulseRight = room.calculateImpulseResponse(solutions[1], maxImpulseLength=1.0, threshold=120.0, pathFilter=noDirectFilter)
maxImpulseLength = max(len(impulseLeft), len(impulseRight))
impulseLeft = np.pad(impulseLeft, (0, max(0, maxImpulseLength - len(impulseLeft))), mode='constant')
impulseRight = np.pad(impulseRight, (0, max(0, maxImpulseLength - len(impulseRight))), mode='constant')
# Diplay impulse response
fig = plt.figure()
plt.title('Estimated impulse responses')
t = np.arange(len(impulseLeft), dtype=np.float)/room.samplingRate
plt.plot(t, impulseLeft, color='b', label='Left channel')
t = np.arange(len(impulseRight), dtype=np.float)/room.samplingRate
plt.plot(t, impulseRight, color='g', label='Right channel')
plt.legend()
plt.xlabel('Time [sec]')
plt.ylabel('Amplitude')
# Apply impulse responses to click signal
outLeft = np.convolve(signal, impulseLeft, mode='full')
outRight = np.convolve(signal, impulseRight, mode='full')
plt.figure()
plt.title('Output signal')
t = np.arange(len(outLeft), dtype=np.float)/fs
plt.plot(t, outLeft, color='b', label='Left channel')
t = np.arange(len(outRight), dtype=np.float)/fs
plt.plot(t, outRight, color='g', label='Right channel')
plt.xlabel('Time [sec]')
plt.ylabel('Amplitude')
plt.legend()
# Save output sound to wav file
outSignal = np.stack((outLeft, outRight), axis=1)
outSignal /= np.max(np.abs(outSignal))
scipy.io.wavfile.write(os.path.join(CDIR, 'output.wav'), fs, outSignal)
# Wait until all figures are closed
plt.show()
| |
# coding: utf-8
# ### Lock animated
# This is a simulation of a ship lock, similar to the ones we can see in the Panama Canal or in some North European ports.
# The lock has two doors (left and right) which can open when the level of water is the same at both sides of the door.
# The picture below shows a particular moment of the simulation where we can observe the different components of the system.
# Ships are queueing at both sides of the lock: those coming from the left side(lship.number) are presented in blue colour while those coming from the right side(rship.number) are presented in red colour. In the picture below, lship.6 and lship.7 are queueing on the left, while rship.12, rship.13 and rship.14 are waiting on the right side. Both doors of the lock are closed because the water level inside the lock is switching from low level to high level, carrying two ships.
# 
# In[1]:
import salabim as sim
left = -1 # Lock on the left side, actually meaning that the lock is open on the left (left door is open).
right = +1 # Lock on the right side, actually meaning that the lock is open on the right (right door is open).
# In[2]:
# We name the lock sides as 'l' for left and 'r' right
def sidename(side):
return "l" if side == left else "r"
# Ships coming from the left are blue and those coming from the right are red
def shipcolor(side):
return "blue" if side == left else "red"
# We create a polygon to represent a ship, this polygon has four coordinates (x, y).
# The polygon has a lenght according to the ship length and 3 units in height.
# This will be used later by the Class Ship to create an animation object.
def ship_polygon(ship):
return (ship.side * (ship.length - 2), 0, ship.side * 3, 0, ship.side * 2, 3, ship.side * (ship.length - 2), 3)
# In[3]:
# We define a rectangle representing the water inside the lock.
# If the lock is in mode 'Switch' the water level will be moving between the two lock levels (low and high).
def lock_water_rectangle(t):
if lock.mode() == "Switch":
y = sim.interpolate(t, lock.mode_time(), lock.scheduled_time(), ylevel[lock.side], ylevel[-lock.side])
# Interpolate is a method of Salabim that provides linear interpolation: (t, t0, t1, f(t0), f(t1))
# mode_time() is a method of sim.Component that returns the time the component got its latest mode
# schedule_time() is a method of Component that returns the time the component is scheduled for
else:
y = ylevel[lock.side]
# When not switching, the water level will be one of the door levels
return (xdoor[left], -waterdepth, xdoor[right], y)
# Returns the two coordinates (x,y) defining the rectangle
# We define a rectangle representing the left door
def lock_door_left_rectangle(t):
if lock.mode() == "Switch" or lock.side == right:
# The left door will be closed when the lock is switching water level or when the right door is open.
# The height of the door is 2 units above the water level
y = ylevel[right] + 2 # y = lockheight + 2 = 7
else:
y = ylevel[left] - waterdepth # 0 - waterdepth = -2
return (xdoor[left] - 1, -waterdepth, xdoor[left] + 1, y)
# Returns the two coordinates defining the rectangle
# We define a rectangle representing the right door; the logic is similar to the one used for the left door.
def lock_door_right_rectangle(t):
if lock.mode() == "Switch" or lock.side == left:
y = ylevel[right] + 2 # y = lockheight + 2 = 7
else:
y = ylevel[right] - waterdepth # Lockheight - waterdepth = 5 - 2 = 3
return (xdoor[right] - 1, -waterdepth, xdoor[right] + 1, y)
# The following sketch can help in understanding the variables used in the animation:
# 
# In[4]:
def do_animation():
global ylevel, xdoor, waterdepth
lockheight = 5 # Maximum height of the water inside the lock
waterdepth = 2 # Minimum level of water
ylevel = {left: 0, right: lockheight} # Dictionary, defining the (fixed) water level at both sides of the lock
xdoor = {left: -0.5 * locklength, right: 0.5 * locklength} # x-coordinate of the doors
xbound = {left: -1.2 * locklength, right: 1.2 * locklength} # x-coordinate of the limits of the screen
# animation_parameters is a method of salabim.Environment to set animation parameters and to start the animation
env.animation_parameters(animate=True, x0=xbound[left], y0=-waterdepth, x1=xbound[right], modelname="Lock", speed=8, background_color="20%gray")
for side in [left, right]:
sim.AnimateQueue(queue=wait[side], x=xdoor[side], y=10 + ylevel[side], direction="n")
# AnimateQueue is a class of Salabim to animate the component in a queue
# wait[left] and wait[right] are ship queues at both sides of the lock
# AnimateRectangle is a class of Salabim to display a rectangle, optinally with a text
# The first rectangle represents the water at the left side of the lock (fixed level)
sim.AnimateRectangle(spec=(xbound[left], ylevel[left] - waterdepth, xdoor[left], ylevel[left]), fillcolor="aqua")
# The second rectangle represents the water at the right side of the lock (fixed level)
sim.AnimateRectangle(spec=(xdoor[right], ylevel[right] - waterdepth, xbound[right], ylevel[right]), fillcolor="aqua")
# The third rectangle represents the water inside the lock, which will be switching
sim.AnimateRectangle(spec=lock_water_rectangle, fillcolor="aqua")
# The fourth rectangle is the left door, which will be apearing and dissapearing
sim.AnimateRectangle(spec=lock_door_left_rectangle)
# The fifth rectangle is the right door, which will be apearing and dissapearing
sim.AnimateRectangle(spec=lock_door_right_rectangle)
# AnimateSlider is a class of Salabim to allow adjusting some parameters on the screen during the simulation
# In this example, we can adjust the interarrival time and the ships mean length
sim.AnimateSlider(x=520, y=0, width=100, height=20, vmin=16, vmax=60, resolution=4, v=iat, label="iat", action=set_iat, xy_anchor="nw")
sim.AnimateSlider(
x=660, y=0, width=100, height=20, vmin=10, vmax=60, resolution=5, v=meanlength, label="mean length", action=set_meanlength, xy_anchor="nw"
)
# The class AnimateMonitor allows display monitors on the screen while running the simulation
sim.AnimateMonitor(
wait[left].length,
linecolor="orange",
fillcolor="bg",
x=-225,
y=-200,
xy_anchor="n",
horizontal_scale=1,
width=450,
linewidth=2,
title=lambda: "Number of waiting ships left. Mean={:10.2f}".format(wait[left].length.mean()),
)
sim.AnimateMonitor(
wait[right].length,
linecolor="orange",
fillcolor="bg",
x=-225,
y=-300,
xy_anchor="n",
horizontal_scale=1,
width=450,
linewidth=2,
title=lambda: "Number of waiting ships right. Mean={:10.2f}".format(wait[right].length.mean()),
)
sim.AnimateMonitor(
wait[left].length_of_stay,
linecolor="white",
fillcolor="bg",
x=-225,
y=-400,
xy_anchor="n",
vertical_scale=0.5,
horizontal_scale=5,
width=450,
height=75,
linewidth=4,
title=lambda: "Waiting time of ships left. Mean={:10.2f}".format(wait[left].length_of_stay.mean()),
)
sim.AnimateMonitor(
wait[right].length_of_stay,
linecolor="white",
fillcolor="bg",
x=-225,
y=-500,
xy_anchor="n",
vertical_scale=0.5,
horizontal_scale=5,
width=450,
height=75,
linewidth=4,
title=lambda: "Waiting time of ships left. Mean={:10.2f}".format(wait[right].length_of_stay.mean()),
)
# There is another queue in the system, the one inside the lock while switching: lockqueue
sim.AnimateQueue(queue=lockqueue, x=lambda: xdoor[-lock.sideq], y=lock_y, direction=lambda: "w" if lock.sideq == left else "e")
# Note that the y-coordinate is dynamic, making it possible that the queue moves up or down
# In[5]:
# A function to set the global variable iat (interarrival time)
def set_iat(val):
global iat
iat = float(val)
# A function to set the global variable meanlength (mean length of the ships)
def set_meanlength(val):
global meanlength
meanlength = float(val)
# In[6]:
# Generator of ships for each side of the lock
class Shipgenerator(sim.Component):
def process(self):
while True: # Infinite loop to generate ships
yield self.hold(sim.Exponential(iat).sample())
ship = Ship(name=sidename(self.side) + "ship.") # The name os the ship is lship.# of rship.#
ship.side = self.side
ship.length = meanlength * sim.Uniform(2.0 / 3, 4.0 / 3).sample()
if lock.mode() == "Idle": # If lock is idle then activate it
lock.activate()
# The component Ship can have the following modes:
# - Wait
# - Sail in
# - In lock
# - Sail out
# In[7]:
class Ship(sim.Component):
# animation_objects is a method of Component that defines how to display a component in AnimateQueue
def animation_objects(self, q):
size_x = self.length # how much to display the ship in x-direction
size_y = 5 # how much to display the ship in y-direction
# an0 is an instance of Animate class - a polygon representing the ship
an0 = sim.AnimatePolygon(
spec=ship_polygon(self),
fillcolor=shipcolor(self.side),
linewidth=0,
text=" " + self.name(),
textcolor="white",
layer=1,
fontsize=2.6,
text_anchor=("e" if self.side == left else "w"),
)
return (size_x, size_y, an0)
def process(self):
self.enter(wait[self.side]) # Ship enters the queue at the corresponding side
yield self.passivate(mode="Wait") # Ship is passivated and mode is set at Wait
yield self.hold(intime, mode="Sail in") # Ship starts moving towards the lock
self.leave(wait[self.side]) # Ship leaves the queue at one side of the lock
self.enter(lockqueue) # Ship enters the queue inside the lock
lock.activate() # Ship activates the lock after accessing the lock
yield self.passivate(mode="In lock") # Ship is passivated and mode is set at In Lock
self.leave(lockqueue) # Ship leaves the queue located inside the lock
yield self.hold(outtime, mode="Sail out") # Ship sails out the lock
lock.activate() # Ship activates the lock after sailing out
# In[8]:
# This function calculates, by interpolation, the level of the water inside the lock when switching
def lock_y(t):
if lock.mode() == "Switch":
y = sim.interpolate(t, lock.mode_time(), lock.scheduled_time(), ylevel[lock.side], ylevel[-lock.side])
else:
y = ylevel[lock.side]
return y
# The component Lock can have the following modes:
# - Idle
# - Wait for sail in
# - Switch
# - Wait for sail out
# In[9]:
class Lock(sim.Component):
def process(self):
while True:
if len(wait[left]) + len(wait[right]) == 0:
yield self.passivate(mode="Idle") # Passivate lock if no ships in queue
usedlength = 0 # Occupied length within the lock
for ship in wait[self.side]: # We check if another ship in the queue fits into the remaining space
if usedlength + ship.length <= locklength: # There is still room for this ship
usedlength += ship.length
ship.activate()
yield self.passivate("Wait for sail in") # Passivate (mode)
yield self.hold(switchtime, mode="Switch") # Component.hold(duration, mode), water level moving
self.side = -self.side # After switching the water level, the side is changed
for ship in lockqueue: # Ships inside the lock
ship.activate() # Activates the ships that has to sail out and then they leave the system
yield self.passivate("Wait for sail out") # Lock waits for the ship to sail out
self.sideq = -self.sideq # Now we are ready to serve the opposite queue
# In[10]:
env = sim.Environment()
# In[11]:
locklength = 60 # Length of the lock (see sketch above)
switchtime = 10 # Time required to switch the water level from high to low or vice versa
intime = 2 # Time required to let ships sail in
outtime = 2 # Time required to let ships sail out
meanlength = 30 # Ships mean length (used to generate ships)
iat = 20 # Interarrival time of ships
# In[12]:
lockqueue = sim.Queue("lockqueue") # This is the queue inside the lock
# In[13]:
wait = {} # A dictionary containing the two queues: left and right
# In[14]:
for side in (left, right):
wait[side] = sim.Queue(name=sidename(side) + "Wait") # Queues at both sides are generated
shipgenerator = Shipgenerator(name=sidename(side) + "Shipgenerator") # lShipgenerator or rShipgenerator
shipgenerator.side = side
# In[15]:
lock = Lock(name="lock") # Lock is instantiated
lock.side = left # When starting the simulation the side is left
lock.sideq = left # The queue we are going to serve
# In[16]:
do_animation()
# In[17]:
env.run()
| |
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.auth.models import _user_has_perm, _user_get_all_permissions, _user_has_module_perms
from django.db import models
from django.contrib.contenttypes.models import ContentTypeManager
from django.contrib import auth
from bson.objectid import ObjectId
from mongoengine import ImproperlyConfigured
from django_mongoengine import document
from django_mongoengine import fields
from .managers import MongoUserManager
try:
from django.contrib.auth.hashers import check_password, make_password
except ImportError:
"""Handle older versions of Django"""
from django.utils.hashcompat import md5_constructor, sha_constructor
def get_hexdigest(algorithm, salt, raw_password):
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'md5':
return md5_constructor(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError('Got unknown password algorithm type in password')
def check_password(raw_password, password):
algo, salt, hash = password.split('$')
return hash == get_hexdigest(algo, salt, raw_password)
def make_password(raw_password):
from random import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random()), str(random()))[:5]
hash = get_hexdigest(algo, salt, raw_password)
return '%s$%s$%s' % (algo, salt, hash)
class ContentType(document.Document):
name = fields.StringField(max_length=100)
app_label = fields.StringField(max_length=100)
model = fields.StringField(max_length=100, verbose_name=_('python model class name'),
unique_with='app_label')
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
# db_table = 'django_content_type'
# ordering = ('name',)
# unique_together = (('app_label', 'model'),)
def __unicode__(self):
return self.name
def model_class(self):
"Returns the Python model class for this type of content."
from django.db import models
return models.get_model(self.app_label, self.model)
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._default_manager.using(self._state.db).get(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label, model)
)
class Permission(document.Document):
"""The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add"
form and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have
a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = fields.StringField(max_length=50, verbose_name=_('username'))
content_type = fields.ReferenceField(ContentType)
codename = fields.StringField(max_length=100, verbose_name=_('codename'))
# FIXME: don't access field of the other class
# unique_with=['content_type__app_label', 'content_type__model'])
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
# unique_together = (('content_type', 'codename'),)
# ordering = ('content_type__app_label', 'content_type__model', 'codename')
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class Group(document.Document):
"""Groups are a generic way of categorizing users to apply permissions,
or some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only
e-mail messages.
"""
name = fields.StringField(max_length=80, unique=True, verbose_name=_('name'))
permissions = fields.ListField(fields.ReferenceField(Permission, verbose_name=_('permissions'), required=False))
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
class User(document.Document):
"""A User document that aims to mirror most of the API specified by Django
at http://docs.djangoproject.com/en/dev/topics/auth/#users
"""
username = fields.StringField(
max_length=254, verbose_name=_('username'),
help_text=_("Required. 254 characters or fewer. Letters, numbers and @/./+/-/_ characters"),
)
first_name = fields.StringField(
max_length=30, blank=True, verbose_name=_('first name'),
)
last_name = fields.StringField(
max_length=30, blank=True, verbose_name=_('last name'))
email = fields.EmailField(verbose_name=_('e-mail address'), blank=True)
password = fields.StringField(
max_length=128,
verbose_name=_('password'),
help_text=_("Use '[algo]$[iterations]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = fields.BooleanField(
default=False,
verbose_name=_('staff status'),
help_text=_("Designates whether the user can log into this admin site."))
is_active = fields.BooleanField(
default=True,
verbose_name=_('active'),
help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = fields.BooleanField(
default=False,
verbose_name=_('superuser status'),
help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = fields.DateTimeField(
default=timezone.now,
verbose_name=_('last login'))
date_joined = fields.DateTimeField(
default=timezone.now,
verbose_name=_('date joined'))
user_permissions = fields.ListField(
fields.ReferenceField(Permission), verbose_name=_('user permissions'),
blank=True, help_text=_('Permissions for the user.'))
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
meta = {
'allow_inheritance': True,
'indexes': [
{'fields': ['username'], 'unique': True, 'sparse': True}
]
}
def __unicode__(self):
return self.username
def get_full_name(self):
"""Returns the users first and last names, separated by a space.
"""
full_name = u'%s %s' % (self.first_name or '', self.last_name or '')
return full_name.strip()
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def set_password(self, raw_password):
"""Sets the user's password - always use this rather than directly
assigning to :attr:`~mongoengine.django.auth.User.password` as the
password is hashed before storage.
"""
self.password = make_password(raw_password)
self.save()
return self
def check_password(self, raw_password):
"""Checks the user's password against a provided password - always use
this rather than directly comparing to
:attr:`~mongoengine.django.auth.User.password` as the password is
hashed before storage.
"""
return check_password(raw_password, self.password)
@classmethod
def create_user(cls, username, password, email=None):
"""Create (and save) a new user with the given username, password and
email address.
"""
now = timezone.now()
# Normalize the address by lowercasing the domain part of the email
# address.
if email is not None:
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = cls(username=username, email=email, date_joined=now)
user.set_password(password)
user.save()
return user
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MO'
'DULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable('app_label and model_name should'
' be separated by a dot in the AUTH_PROFILE_MODULE set'
'ting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable('Unable to load the profile '
'model, check AUTH_PROFILE_MODULE in your project sett'
'ings')
self._profile_cache = model._default_manager.using(self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class MongoUser(models.Model):
""""Dummy user model for Django.
MongoUser is used to replace Django's UserManager with MongoUserManager.
The actual user document class is django_mongoengine.auth.models.User or any
other document class specified in MONGOENGINE_USER_DOCUMENT.
To get the user document class, use `get_user_document()`.
"""
objects = MongoUserManager()
class Meta:
app_label = 'mongo_auth'
def set_password(self, password):
"""Doesn't do anything, but works around the issue with Django 1.6."""
make_password(password)
MongoUser._meta.pk.to_python = ObjectId
| |
import os, sys, socket, random
from urllib import quote_plus
from pandac.PandaModules import HTTPClient
from pandac.PandaModules import HTTPCookie
from pandac.PandaModules import URLSpec
from pandac.PandaModules import Ramfile
from pandac.PandaModules import Ostream
from pandac.PandaModules import HTTPDate
from pandac.PandaModules import DocumentSpec
from direct.task.Task import Task
from direct.directnotify.DirectNotifyGlobal import directNotify
notify = directNotify.newCategory('UserFunnel')
class UserFunnel:
def __init__(self):
self.hitboxAcct = 'DM53030620EW'
self.language = 'en-us'
self.cgRoot = 'ToonTown_Online'
self.cgBeta = 'Beta'
self.cgRelease = 'Release'
self.cgLocation = 'US'
self.campaignID = ''
self.cfCookieFile = 'cf.txt'
self.dynamicVRFunnel = 'http://download.toontown.com/'
self.hostDict = {0: 'Internal Disney PHP Collector Site',
1: 'ehg-dig.hitbox.com/HG?',
2: 'ehg-dig.hitbox.com/HG?',
3: 'build64.online.disney.com:5020/index.php?'}
self.CurrentHost = ''
self.URLtoSend = ''
self.gameName = 'ToonTown'
self.browserName = 'Panda3D%20(' + self.gameName + ';%20' + sys.platform + ')'
self.HTTPUserHeader = [('User-agent', 'Panda3D')]
self.osMajorver = ''
self.osMinorver = ''
self.osRevver = ''
self.osBuild = ''
self.osType = ''
self.osComments = ''
self.msWinTypeDict = {0: 'Win32s on Windows 3.1',
1: 'Windows 95/98/ME',
2: 'Windows NT/2000/XP',
3: 'Windows CE'}
self.milestoneDict = {0: 'New User',
1: 'Create Account',
2: 'View EULA',
3: 'Accept EULA',
4: 'Download Start',
5: 'Download End',
6: 'Installer Run',
7: 'Launcher Start',
8: 'Launcher Login',
9: 'Client Opens',
10: 'Create Pirate Loads',
11: 'Create Pirate Exit',
12: 'Cutscene One Start',
13: 'Cutscene One Ends',
14: 'Cutscene Two Start',
15: 'Cutscene Thee Start',
16: 'Cutscene Three Ends',
17: 'Access Cannon',
18: 'Cutscene Four Starts',
19: 'Cutscene Four Ends',
20: 'Dock - Start Game'}
self.macTypeDict = {2: 'Jaguar',
1: 'Puma',
3: 'Panther',
4: 'Tiger',
5: 'Lepard'}
self.milestone = ''
self.pandaHTTPClientVarWSS = []
self.pandaHTTPClientVarCTG = []
self.pandaHTTPClientVarDM = []
self.checkForCFfile()
self.httpSession = HTTPClient()
self.whatOSver()
def checkForCFfile(self):
if firstRun() == True:
pass
elif os.path.isfile(self.cfCookieFile) == False:
firstRun('write', True)
def whatOSver(self):
if sys.platform == 'win32':
self.osMajorver = str(sys.getwindowsversion()[0])
self.osMinorver = str(sys.getwindowsversion()[1])
self.osBuild = str(sys.getwindowsversion()[2])
self.osType = str(sys.getwindowsversion()[3])
self.osComments = str(sys.getwindowsversion()[4])
return
if sys.platform == 'darwin':
self.osMajorver = '10'
osxcmd = '/usr/sbin/system_profiler SPSoftwareDataType |/usr/bin/grep "System Version"'
infopipe = os.popen(osxcmd, 'r')
parseLine = infopipe.read()
infopipe.close()
del infopipe
notify.info('parseLine = %s' % str(parseLine))
versionStringStart = parseLine.find('10.')
notify.info('versionStringStart = %s' % str(versionStringStart))
testPlist = False
try:
self.osMinorver = parseLine[versionStringStart + 3]
self.osRevver = parseLine[versionStringStart + 5:versionStringStart + 7].strip(' ')
self.osBuild = parseLine[int(parseLine.find('(')) + 1:parseLine.find(')')]
except:
notify.info("couldn't parse the system_profiler output, using zeros")
self.osMinorver = '0'
self.osRevver = '0'
self.osBuild = '0000'
testPlist = True
del versionStringStart
del parseLine
del osxcmd
if testPlist:
try:
import plistlib
pl = plistlib.readPlist('/System/Library/CoreServices/SystemVersion.plist')
notify.info('pl=%s' % str(pl))
parseLine = pl['ProductVersion']
numbers = parseLine.split('.')
notify.info('parseline =%s numbers =%s' % (parseLine, numbers))
self.osMinorver = numbers[1]
self.osRevver = numbers[2]
self.osBuild = pl['ProductBuildVersion']
except:
notify.info('tried plist but still got exception')
self.osMinorver = '0'
self.osRevver = '0'
self.osBuild = '0000'
return
def setmilestone(self, ms):
if firstRun() == False:
self.milestone = ms
else:
self.milestone = '%s_INITIAL' % ms
def setgamename(self, gamename):
self.gameName = gamename
def printosComments(self):
return self.osComments
def setHost(self, hostID):
self.CurrentHost = hostID
def getFunnelURL(self):
if patcherVer() == ['OFFLINE']:
return
if patcherVer() == []:
patcherHTTP = HTTPClient()
if checkParamFile() == None:
patcherDoc = patcherHTTP.getDocument(URLSpec('http://download.toontown.com/english/currentVersion/content/patcher.ver'))
vconGroup('w', self.cgRelease)
else:
patcherDoc = patcherHTTP.getDocument(URLSpec(checkParamFile()))
vconGroup('w', self.cgBeta)
rf = Ramfile()
patcherDoc.downloadToRam(rf)
self.patcherURL = rf.getData()
if self.patcherURL.find('FUNNEL_LOG') == -1:
patcherVer('w', 'OFFLINE')
return
self.patcherURL = self.patcherURL.split('\n')
del rf
del patcherDoc
del patcherHTTP
while self.patcherURL:
self.confLine = self.patcherURL.pop()
if self.confLine.find('FUNNEL_LOG=') != -1 and self.confLine.find('#FUNNEL_LOG=') == -1:
self.dynamicVRFunnel = self.confLine[11:].strip('\n')
patcherVer('w', self.confLine[11:].strip('\n'))
else:
self.dynamicVRFunnel = patcherVer()[0]
return
def isVarSet(self, varInQuestion):
try:
tempvar = type(varInQuestion)
return 1
except NameError:
return 0
def buildURL(self):
if sys.platform == 'win32':
hitboxOSType = 'c3'
else:
hitboxOSType = 'c4'
if self.CurrentHost == 1:
self.URLtoSend = 'http://' + self.hostDict[self.CurrentHost] + 'hb=' + str(self.hitboxAcct) + '&n=' + str(self.milestone) + '&ln=' + self.language + '&gp=STARTGAME&fnl=TOONTOWN_FUNNEL&vcon=/' + self.cgRoot + '/' + self.cgLocation + '/' + str(vconGroup()) + '&c1=' + str(sys.platform) + '&' + str(hitboxOSType) + '=' + str(self.osMajorver) + '_' + str(self.osMinorver) + '_' + str(self.osRevver) + '_' + str(self.osBuild)
if self.CurrentHost == 2:
self.URLtoSend = 'http://' + self.hostDict[self.CurrentHost] + 'hb=' + str(self.hitboxAcct) + '&n=' + str(self.milestone) + '&ln=' + self.language + '&vcon=/' + self.cgRoot + '/' + self.cgLocation + '/' + str(vconGroup()) + '&c1=' + str(sys.platform) + '&' + str(hitboxOSType) + '=' + str(self.osMajorver) + '_' + str(self.osMinorver) + '_' + str(self.osRevver) + '_' + str(self.osBuild)
if self.CurrentHost == 0:
localMAC = str(getMAC())
self.URLtoSend = str(self.dynamicVRFunnel) + '?funnel=' + str(self.milestone) + '&platform=' + str(sys.platform) + '&sysver=' + str(self.osMajorver) + '_' + str(self.osMinorver) + '_' + str(self.osRevver) + '_' + str(self.osBuild) + '&mac=' + localMAC + '&username=' + str(loggingSubID()) + '&id=' + str(loggingAvID())
def readInPandaCookie(self):
thefile = open(self.cfCookieFile, 'r')
thedata = thefile.read().split('\n')
thefile.close()
del thefile
if thedata[0].find('Netscape HTTP Cookie File') != -1:
return
thedata.pop()
try:
while thedata:
temp = thedata.pop()
temp = temp.split('\t')
domain = temp[0]
loc = temp[1]
variable = temp[2]
value = temp[3]
if variable == 'CTG':
self.pandaHTTPClientVarCTG = [domain,
loc,
variable,
value]
self.setTheHTTPCookie(self.pandaHTTPClientVarCTG)
if variable == self.hitboxAcct + 'V6':
self.pandaHTTPClientVarDM = [domain,
loc,
variable,
value]
self.setTheHTTPCookie(self.pandaHTTPClientVarDM)
if variable == 'WSS_GW':
self.pandaHTTPClientVarWSS = [domain,
loc,
variable,
value]
self.setTheHTTPCookie(self.pandaHTTPClientVarWSS)
except IndexError:
print 'UserFunnel(Warning): Cookie Data file bad'
del thedata
def updateInstanceCookieValues(self):
a = self.httpSession.getCookie(HTTPCookie('WSS_GW', '/', '.hitbox.com'))
if a.getName():
self.pandaHTTPClientVarWSS = ['.hitbox.com',
'/',
'WSS_GW',
a.getValue()]
b = self.httpSession.getCookie(HTTPCookie('CTG', '/', '.hitbox.com'))
if b.getName():
self.pandaHTTPClientVarCTG = ['.hitbox.com',
'/',
'CTG',
b.getValue()]
c = self.httpSession.getCookie(HTTPCookie(self.hitboxAcct + 'V6', '/', 'ehg-dig.hitbox.com'))
if c.getName():
self.pandaHTTPClientVarDM = ['ehg-dig.hitbox.com',
'/',
self.hitboxAcct + 'V6',
c.getValue()]
del a
del b
del c
def setTheHTTPCookie(self, cookieParams):
c = HTTPCookie(cookieParams[2], cookieParams[1], cookieParams[0])
c.setValue(cookieParams[3])
self.httpSession.setCookie(c)
def writeOutPandaCookie(self):
try:
thefile = open(self.cfCookieFile, 'w')
if len(self.pandaHTTPClientVarWSS) == 4:
thefile.write(self.pandaHTTPClientVarWSS[0] + '\t' + self.pandaHTTPClientVarWSS[1] + '\t' + self.pandaHTTPClientVarWSS[2] + '\t' + self.pandaHTTPClientVarWSS[3] + '\n')
if len(self.pandaHTTPClientVarCTG) == 4:
thefile.write(self.pandaHTTPClientVarCTG[0] + '\t' + self.pandaHTTPClientVarCTG[1] + '\t' + self.pandaHTTPClientVarCTG[2] + '\t' + self.pandaHTTPClientVarCTG[3] + '\n')
if len(self.pandaHTTPClientVarDM) == 4:
thefile.write(self.pandaHTTPClientVarDM[0] + '\t' + self.pandaHTTPClientVarDM[1] + '\t' + self.pandaHTTPClientVarDM[2] + '\t' + self.pandaHTTPClientVarDM[3] + '\n')
thefile.close()
except IOError:
return
def prerun(self):
self.getFunnelURL()
self.buildURL()
if os.path.isfile(self.cfCookieFile) == True:
if self.CurrentHost == 1 or self.CurrentHost == 2:
self.readInPandaCookie()
def run(self):
if self.CurrentHost == 0 and patcherVer() == ['OFFLINE']:
return
self.nonBlock = self.httpSession.makeChannel(False)
self.nonBlock.beginGetDocument(DocumentSpec(self.URLtoSend))
instanceMarker = str(random.randint(1, 1000))
instanceMarker = 'FunnelLoggingRequest-%s' % instanceMarker
self.startCheckingAsyncRequest(instanceMarker)
def startCheckingAsyncRequest(self, name):
taskMgr.remove(name)
taskMgr.doMethodLater(0.5, self.pollFunnelTask, name)
def stopCheckingFunnelTask(self, name):
taskMgr.remove('pollFunnelTask')
def pollFunnelTask(self, task):
result = self.nonBlock.run()
if result == 0:
self.stopCheckingFunnelTask(task)
if self.CurrentHost == 1 or self.CurrentHost == 2:
self.updateInstanceCookieValues()
self.writeOutPandaCookie()
else:
return Task.again
def logSubmit(setHostID, setMileStone):
if __dev__:
return
trackItem = UserFunnel()
trackItem.setmilestone(quote_plus(setMileStone))
trackItem.setHost(setHostID)
trackItem.prerun()
trackItem.run()
def getVRSFunnelURL():
a = UserFunnel()
a.getFunnelURL()
class HitBoxCookie:
def __init__(self):
self.ieCookieDir = os.getenv('USERPROFILE') + '\\Cookies'
self.pythonCookieFile = 'cf.txt'
self.hitboxCookieFile = None
self.ehgdigCookieFile = None
self.hitboxAcct = 'DM53030620EW'
self.ctg = None
self.wss_gw = None
self.dmAcct = None
self.pythonCookieHeader = ' # Netscape HTTP Cookie File\n # http://www.netscape.com/newsref/std/cookie_spec.html\n # This is a generated file! Do not edit.\n\n'
return
def returnIECookieDir(self):
return self.ieCookieDir
def findIECookieFiles(self):
try:
sdir = os.listdir(self.ieCookieDir)
except WindowsError:
print 'Dir does not exist, do nothing'
return
while sdir:
temp = sdir.pop()
if temp.find('@hitbox[') != -1:
self.hitboxCookieFile = temp
if temp.find('@ehg-dig.hitbox[') != -1:
self.ehgdigCookieFile = temp
if self.hitboxCookieFile != None and self.ehgdigCookieFile != None:
return 1
if self.hitboxCookieFile == None and self.ehgdigCookieFile == None:
return 0
else:
return -1
return
def openHitboxFile(self, filename, type = 'python'):
if type == 'ie':
fullfile = self.ieCookieDir + '\\' + filename
else:
fullfile = filename
cf = open(fullfile, 'r')
data = cf.read()
cf.close()
return data
def splitIECookie(self, filestream):
return filestream.split('*\n')
def sortIECookie(self, filestreamListElement):
return [filestreamListElement.split('\n')[2], filestreamListElement.split('\n')[0], filestreamListElement.split('\n')[1]]
def sortPythonCookie(self, filestreamListElement):
return [filestreamListElement.split('\t')[0], filestreamListElement.split('\t')[5], filestreamListElement.split('\t')[6]]
def writeIEHitBoxCookies(self):
if self.ctg == None or self.wss_gw == None or self.dmAcct == None:
return
if sys.platform != 'win32':
return
self.findIECookieFiles()
iecData = self.openHitboxFile(self.ehgdigCookieFile, 'ie')
iecData = iecData.split('*\n')
x = 0
while x < len(iecData):
if iecData[x].find(self.hitboxAcct) != -1:
iecData.pop(x)
print 'Removed it from the list'
break
x += 1
iecWrite = open(self.ieCookieDir + '\\' + self.ehgdigCookieFile, 'w')
while iecData:
iecBuffer = iecData.pop() + '*\n'
iecBuffer = iecBuffer.strip('/')
if iecBuffer[0] == '.':
iecBuffer = iecBuffer[1:]
iecWrite.write(iecBuffer)
tempDMBUFFER = self.dmAcct[0]
if tempDMBUFFER[0].find('.') == 0:
tempDMBUFFER = tempDMBUFFER[1:]
iecWrite.write(self.dmAcct[1] + '\n' + self.dmAcct[2] + '\n' + tempDMBUFFER + '/\n*\n')
iecWrite.close()
del iecData
del iecWrite
del iecBuffer
iecWrite = open(self.ieCookieDir + '\\' + self.hitboxCookieFile, 'w')
iecBuffer = self.ctg[0]
if iecBuffer[0] == '.':
iecBuffer = iecBuffer[1:]
if iecBuffer.find('/') == -1:
iecBuffer = iecBuffer + '/'
iecWrite.write(self.ctg[1] + '\n' + self.ctg[2] + '\n' + iecBuffer + '\n*\n')
iecWrite.write(self.wss_gw[1] + '\n' + self.wss_gw[2] + '\n' + iecBuffer + '\n*\n')
iecWrite.close()
return
def OLDwritePythonHitBoxCookies(self, filename = 'cf.txt'):
if self.ctg == None or self.wss_gw == None or self.dmAcct == None:
return
outputfile = open(filename, 'w')
outputfile.write(self.pythonCookieHeader)
outputfile.write('.' + self.dmAcct[0].strip('/') + '\tTRUE\t/\tFALSE\t9999999999\t' + self.dmAcct[1] + '\t' + self.dmAcct[2] + '\n')
outputfile.write('.' + self.ctg[0].strip('/') + '\tTRUE\t/\tFALSE\t9999999999\t' + self.ctg[1] + '\t' + self.ctg[2] + '\n')
outputfile.write('.' + self.wss_gw[0].strip('/') + '\tTRUE\t/\tFALSE\t9999999999\t' + self.wss_gw[1] + '\t' + self.wss_gw[2] + '\n')
outputfile.close()
return
def writePythonHitBoxCookies(self, filename = 'cf.txt'):
if self.ctg == None or self.wss_gw == None or self.dmAcct == None:
return
outputfile = open(filename, 'w')
outputfile.write('.' + self.dmAcct[0].strip('/') + '\t/\t' + self.dmAcct[1] + '\t' + self.dmAcct[2] + '\n')
outputfile.write('.' + self.ctg[0].strip('/') + '\t/\t' + self.ctg[1] + '\t' + self.ctg[2] + '\n')
outputfile.write('.' + self.wss_gw[0].strip('/') + '\t/\t' + self.wss_gw[1] + '\t' + self.wss_gw[2] + '\n')
outputfile.close()
return
def loadPythonHitBoxCookies(self):
if os.path.isfile(self.pythonCookieFile) != 1:
return
pythonStandard = self.openHitboxFile(self.pythonCookieFile, 'python')
pythonStandard = pythonStandard.split('\n\n').pop(1)
pythonStandard = pythonStandard.split('\n')
for x in pythonStandard:
if x.find('\t' + self.hitboxAcct) != -1:
self.dmAcct = self.sortPythonCookie(x)
if x.find('\tCTG\t') != -1:
self.ctg = self.sortPythonCookie(x)
if x.find('\tWSS_GW\t') != -1:
self.wss_gw = self.sortPythonCookie(x)
def loadIEHitBoxCookies(self):
if self.findIECookieFiles() != 1:
return
if sys.platform != 'win32':
return
hitboxStandard = self.openHitboxFile(self.hitboxCookieFile, 'ie')
hitboxDIG = self.openHitboxFile(self.ehgdigCookieFile, 'ie')
hitboxStandard = self.splitIECookie(hitboxStandard)
hitboxDIG = self.splitIECookie(hitboxDIG)
ctg = None
wss = None
for x in hitboxStandard:
if x.find('CTG\n') != -1:
ctg = x
if x.find('WSS_GW\n') != -1:
wss = x
if ctg == None or wss == None:
return
DM = None
for x in hitboxDIG:
if x.find(self.hitboxAcct) != -1:
DM = x
if DM == None:
return
self.ctg = self.sortIECookie(ctg)
self.wss_gw = self.sortIECookie(wss)
self.dm560804E8WD = self.sortIECookie(DM)
return
def convertHitBoxIEtoPython():
if sys.platform != 'win32':
print 'Cookie Converter: Warning: System is not MS-Windows. I have not been setup to work with other systems yet. Sorry ' + sys.platform + ' user. The game client will create a cookie.'
return
if __dev__:
return
a = HitBoxCookie()
a.loadIEHitBoxCookies()
a.writePythonHitBoxCookies()
del a
def convertHitBoxPythontoIE():
if sys.platform != 'win32':
print 'System is not MS-Windows. I have not been setup to work with other systems yet. Sorry ' + sys.platform + ' user.'
return
if os.path.isfile('cf.txt') == True:
return
a = HitBoxCookie()
a.loadPythonHitBoxCookies()
a.writeIEHitBoxCookies()
del a
def getreg(regVar):
if sys.platform != 'win32':
print "System is not MS-Windows. I haven't been setup yet to work with systems other than MS-Win using MS-Internet Explorer Cookies"
return ''
siteName = 'toontown.online.disney'
cookiedir = os.getenv('USERPROFILE') + '\\Cookies'
sdir = os.listdir(cookiedir)
wholeCookie = None
while sdir:
temp = sdir.pop()
if temp.find(siteName) != -1:
wholeCookie = temp
break
if wholeCookie == None:
print 'Cookie not found for site name: ' + siteName
return ''
CompleteCookiePath = cookiedir + '\\' + wholeCookie
cf = open(CompleteCookiePath, 'r')
data = cf.read()
cf.close()
del cf
data = data.replace('%3D', '=')
data = data.replace('%26', '&')
regNameStart = data.find(regVar)
if regNameStart == -1:
return ''
regVarStart = data.find('=', regNameStart + 1)
regVarEnd = data.find('&', regNameStart + 1)
return data[regVarStart + 1:regVarEnd]
def getMAC(staticMAC = [None]):
if staticMAC[0] == None:
if sys.platform == 'win32':
correctSection = 0
try:
ipconfdata = os.popen('/WINDOWS/SYSTEM32/ipconfig /all').readlines()
except:
staticMAC[0] = 'NO_MAC'
return staticMAC[0]
for line in ipconfdata:
if line.find('Local Area Connection') >= 0:
correctSection = 1
if line.find('Physical Address') >= 0 and correctSection == 1:
pa = line.split(':')[-1].strip()
correctSection = 0
staticMAC[0] = pa
return pa
if sys.platform == 'darwin':
macconfdata = os.popen('/usr/sbin/system_profiler SPNetworkDataType |/usr/bin/grep MAC').readlines()
result = '-1'
if macconfdata:
if macconfdata[0].find('MAC Address') != -1:
pa = macconfdata[0][macconfdata[0].find(':') + 2:macconfdata[0].find(':') + 22].strip('\n')
staticMAC[0] = pa.replace(':', '-')
result = staticMAC[0]
return result
if sys.platform != 'darwin' and sys.platform != 'win32':
print 'System is not running OSX or MS-Windows.'
return '-2'
else:
return staticMAC[0]
return
def firstRun(operation = 'read', newPlayer = None, newPlayerBool = [False]):
if operation != 'read':
if len(newPlayerBool) != 0:
newPlayerBool.pop()
newPlayerBool.append(newPlayer)
return newPlayerBool[0]
def patcherVer(operation = 'read', url = None, patchfile = []):
if operation != 'read':
if len(patchfile) != 0:
patchfile.pop()
patchfile.append(url)
return patchfile
def loggingAvID(operation = 'read', newId = None, localAvId = [None]):
if operation == 'write':
localAvId[0] = newId
else:
return localAvId[0]
def loggingSubID(operation = 'read', newId = None, localSubId = [None]):
if operation == 'write':
localSubId[0] = newId
else:
return localSubId[0]
def vconGroup(operation = 'read', group = None, staticStore = []):
if operation != 'read':
if len(staticStore) != 0:
staticStore.pop()
staticStore.append(group)
try:
return staticStore[0]
except IndexError:
return None
return None
def printUnreachableLen():
import gc
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
unreachableL = []
for it in gc.garbage:
unreachableL.append(it)
return len(str(unreachableL))
def printUnreachableNum():
import gc
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
return len(gc.garbage)
def reportMemoryLeaks():
if printUnreachableNum() == 0:
return
import bz2, gc
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
uncompressedReport = ''
for s in gc.garbage:
try:
uncompressedReport += str(s) + '&'
except TypeError:
pass
reportdata = bz2.compress(uncompressedReport, 9)
headers = {'Content-type': 'application/x-bzip2',
'Accept': 'text/plain'}
try:
baseURL = patcherVer()[0].split('/lo')[0]
except IndexError:
print 'Base URL not available for leak submit'
return
basePort = 80
if baseURL.count(':') == 2:
basePort = baseURL[-4:]
baseURL = baseURL[:-5]
baseURL = baseURL[7:]
if basePort != 80:
finalURL = 'http://' + baseURL + ':' + str(basePort) + '/logging/memory_leak.php?leakcount=' + str(printUnreachableNum())
else:
finalURL = 'http://' + baseURL + '/logging/memory_leak.php?leakcount=' + str(printUnreachableNum())
reporthttp = HTTPClient()
reporthttp.postForm(URLSpec(finalURL), reportdata)
def checkParamFile():
if os.path.exists('parameters.txt') == 1:
paramfile = open('parameters.txt', 'r')
contents = paramfile.read()
paramfile.close()
del paramfile
contents = contents.split('\n')
newURL = ''
while contents:
checkLine = contents.pop()
if checkLine.find('PATCHER_BASE_URL=') != -1 and checkLine[0] == 'P':
newURL = checkLine.split('=')[1]
newURL = newURL.replace(' ', '')
break
if newURL == '':
return
else:
return newURL + 'patcher.ver'
| |
from copy import deepcopy
import numpy as nump
ndsa = 58
#####################################################Organ Allocation Procedures, Offer Routines, and Matching Functions#######################################################################
def Allocate(organ, OPTN, Sim, Regions, SharingPartners, Patients_Accept, Donor_Accept, DSA_Avg_Times, AcceptanceModelS1, AcceptanceModel):
"""
This function compiles the offer lists and calls functions for subsequent matching and offering. It returns a tuple describing whether the organ is transplanted
or discarded, the transplanting DSA (if transplanted), and the patient id (if transplanted).
Input:
@organ: organ that needs a match for transplant
@OPTN: complete patient data
@Sim: class object containing relevant variables for simulation
@Regions: neighborhood map for regions, districts, or neighborhoods
@SharingPartners: neighbhoord map adding sharing partners to existing geographic relationships among OPOs
@Patients_Accept: coefficients regarding patient's characteristics for acceptance model
@Donor_Accept: coefficients regarding donor's characteristics for acceptance model
@DSA_Avg_Times: data on average transport times between DSAs
@AcceptanceModelS1: coefficients regarding patient's characteristics for status 1 acceptance model
@AcceptanceModel: coefficients regarding patient's characteristics for non-status 1 acceptance model
Output:
@(organ transplanted/discarded, transplanting DSA [if transplanted], patient id [if transplanted]): tupe with information on the organ and
corresponding DSA and patient (if transplanted)
"""
#Compile Offer List
LocalList = deepcopy(OPTN[organ.DSA]) #preinitialize list of potential match patients within the DSA; list of match patients already made
RegionalList = [] #preinitialize list of potential match patients within a region
NationalList =[] #preinitialize list of potential match patients outside a region within the nation
#Give boost to local candidates if applicable
if Sim.localboost > 0:
#iterate through patients of local list
for patient in LocalList:
#if patient MELD is below 40, give a local boost
if patient.MELD <=40:
#Boosting non-Status1 candidates
patient.MELD = patient.MELD + Sim.localboost
#if MELD score is over 40, set it down to 40 as the max
if patient.MELD > 40:
patient.MELD =40
#iterate through list of DSAs
for i in range(0,ndsa):
#if sharing partners are implemented and if a DSA is a sharing partner of the current DSA
#add to the regional list
if Sim.spartners ==1 and SharingPartners[organ.DSA,i]==1:
RegionalList = RegionalList + deepcopy(OPTN[i])
#if a DSA is a neighbor of a current DSA, add it to the regional list
if Regions[organ.DSA,i] ==1 and i !=organ.DSA:
RegionalList = RegionalList + deepcopy(OPTN[i])
#if not, add it to the national list
elif Regions[organ.DSA,i] !=1 and i !=organ.DSA:
NationalList = NationalList + deepcopy(OPTN[i])
#Give boost to regional candidates if applicable
if Sim.regionalboost > 0:
for patient in RegionalList:
if patient.MELD <=40:
#Boosting non-Status1 candidates
patient.MELD = patient.MELD + Sim.regionalboost
if patient.MELD > 40:
patient.MELD =40
#If there is regional sharing, merge the local list and the regional list together
if Sim.regional_sharing == 1:
#combine the local list and regional list
OfferList = LocalList + RegionalList
#sort the National List
NationalList = sorted(NationalList,key=lambda patient: patient.MELD, reverse=True)
#sort the merged list
OfferList = sorted(OfferList,key=lambda patient: patient.MELD, reverse=True)
#combine the offer list and national list together
OfferList = OfferList + NationalList
else:
#Implement Share 35 and Share 15
OfferList = [] #preintialize Offer List
Share35 = [] #preinitialize Share 35 list
StandardList = [] #preintialize Standard List
Share15 = [] #preinitialize Share 15 list
Share15_2 = [] #preinitialize another Share15 List
Share15_3 = [] #preinitialize another Share15 List
StandardList2 = [] #preinitialize another standard list
StandardList3 = [] #preinitialize another standard list
#iterate through patients of the Local List
for patient in LocalList:
#if patient's MELD is at least Share35 value, add him to the Share35 List
if patient.MELD >= Sim.ShareU:
Share35.append(patient)
#if patient's MELD is below Share15 value, add him to the Share 15 List
elif patient.MELD <Sim.ShareL:
Share15.append(patient)
#else add him to the Standard List
else:
StandardList.append(patient)
#iterate through patients of the Regional List
for patient in RegionalList:
#if patient's MELD is at least Share35 value, add him to the Share35 List
if patient.MELD >= Sim.ShareU:
Share35.append(patient)
#if patient's MELD is at below Share15 value, add him to the Share15 List
elif patient.MELD <Sim.ShareL:
Share15_2.append(patient)
#else add him to the Standard List
else:
StandardList2.append(patient)
#iterate patients of the National List
for patient in NationalList:
#if patient's MELD is below Share15 value, add him to the Share15 List
if patient.MELD <Sim.ShareL:
Share15_3.append(patient)
#else add him to the second standard list
else:
StandardList3.append(patient)
#sort the lists by MELD Score in decreasing order
Share35 = sorted(Share35,key=lambda patient: patient.MELD, reverse=True)
StandardList = sorted(StandardList,key=lambda patient: patient.MELD, reverse=True)
StandardList2 = sorted(StandardList2,key=lambda patient: patient.MELD, reverse=True)
StandardList3 = sorted(StandardList3, key=lambda patient:patient.MELD, reverse = True)
Share15 = sorted(Share15,key=lambda patient: patient.MELD, reverse=True)
Share15_2 = sorted(Share15_2, key = lambda patient: patient.MELD, reverse = True)
Share15_3 = sorted(Share15_3, key = lambda patient: patient.MELD, reverse = True)
#combine the list
OfferList = Share35 + StandardList + StandardList2 + StandardList3 + Share15 + Share15_2 + Share15_3
#Execute Match-Run
if OfferList ==[]:
return [0,1,[],[]]
else:
return MatchRun(organ,OfferList, Sim, Patients_Accept, Donor_Accept, DSA_Avg_Times, AcceptanceModelS1, AcceptanceModel)
#delete offer list to clear memory
del OfferList
def MatchRun(offered_organ, offered_list, Sim, Patients_Accept, Donor_Accept, DSA_Avg_Times, AcceptanceModelS1, AcceptanceModel):
"""This function performs a match run for the offered organ on the list of eligible patients.
It outputs a list containing information on whether the match is found, the DSA of the patient( if match is found),
and patient id (if match is found)
Inputs:
@offered_organ: organ being offered
@offered_list: list of eligible patients for the offered organ
@Sim: class object containing relevant variables for simulation
@Patients_Accept: coefficients regarding patient's characteristics for acceptance model
@Donor_Accept: coefficients regarding donor's characteristics for acceptance model
@DSA_Avg_Times: data on average transport time between DSAs
@AcceptanceModelS1: coefficients regarding patient's characteristics for status-1 acceptance model
@AcceptanceModel: coefficients regarding patient's characteristics for non-status1 acceptance model
Outputs:
@[offerresult[0], offerresult[2], offerresult[3]]: list containing information on whether a match is found
and patient's information (if match is found)
"""
noffers =0 #Counts offers made
offerresult = [0,1,[],[]] #preinitialize result list
for patient in offered_list: #Scan offer list
if MatchCheck(offered_organ, patient, Sim) == 1 and noffers < Sim.maxrejects: #Found matching patient
offerresult = Offer(offered_organ,patient,noffers, Sim, Patients_Accept, Donor_Accept, DSA_Avg_Times, AcceptanceModelS1, AcceptanceModel) #make offer
noffers = noffers + offerresult[1] #increment number of offers made
if offerresult[0] == 1: #end loop if offer accepted for transplant
break
elif noffers >= Sim.maxrejects: #end loop if too many offers made
break
#return the result vector (match found, matching recipient DSA, matching recipient id)
return [offerresult[0],offerresult[2],offerresult[3]]
def MatchCheck(offered_organ, potential_recipient, Sim):
"""
This function peforms an initial check on the offered organ and potential patient to see if patient
can accept it or not based on biological characteristics
Input:
@offered_organ: organ being offered
@potential_recipient: recipient being checked for eligiblity for the organ
@Sim: class object containing relevant variables for simulation
Output:
@compatible: indicator on whether the patient is eligible for the organ or not
"""
bcompatible = 0 #initialization for blood compatibility
active = 1 - potential_recipient.Inactive #if patient is active on the waitlist or not
ready =1 #default ready indicator
#if patient is relisted, but his transplant time is before current time, then set ready to 0
if potential_recipient.Relist ==1 and potential_recipient.RelistTxTime < Sim.clock:
ready = 0
#check blood compatibility
if offered_organ.ABO ==0:
if potential_recipient.ABO == 0 or potential_recipient.ABO == 1:
bcompatible = 1
elif offered_organ.ABO ==1:
if potential_recipient.ABO == 1:
bcompatible = 1
elif offered_organ.ABO ==2:
if potential_recipient.ABO == 1 or potential_recipient.ABO == 2:
bcompatible = 1
else:
bcompatible = 1
compatible = bcompatible*active*ready
return compatible
def Offer(offered_organ, matching_recipient, noffers, Sim, Patients_Accept, Donor_Accept, DSA_Avg_Times, AcceptanceModelS1, AcceptanceModel):
"""This function offers the offered organ to the matching recipient and see if the recipient accepts or not.
Input:
@offered_organ: organ being offered
@matching_recipient: recipient being offered the organ
@noffers: number of offers made already before the matching recipient
@Sim: a class object that contain variables relevant to the simulation
@Patients_Accept: coefficients regarding patient's characteristics for acceptance model
@Donor_Accept: coefficients regarding donor's characteristics for for acceptance model
@DSA_Avg_Times: data on average transport time between DSAs
@AcceptanceModelS1: coefficients regarding patient's characteristics for status-1 acceptance model
@AcceptanceModel: coefficients regarding patient's characteristics for non-status-1 acceptance model
Output:
@[accept, reject, matching_recipient.DSA, matching_recipient.id]: accept is an indicator of whether the patient
accpets the organ or not, reject is an indicator of whether the patient rejects the organ or not, DSA is the
patient's DSA, and id is the patient's id
"""
accept =1 #default acceptance
#Generate acceptance decision
r1 = nump.random.uniform(0,1,1)
#Implement Acceptance Model
if matching_recipient.Status1 ==1: #Status-1 patient
patientx = [1,
float(Patients_Accept[matching_recipient.id][217]),
float(Patients_Accept[matching_recipient.id][218]),
float(Donor_Accept[offered_organ.organid][94]),
DSA_Avg_Times[offered_organ.DSA,matching_recipient.DSA],
float(Donor_Accept[offered_organ.organid][47]),
float(Donor_Accept[offered_organ.organid][15]),
float(Patients_Accept[matching_recipient.id][13]=="True"),
1,
0,
float(float(Patients_Accept[matching_recipient.id][223])<float(Donor_Accept[offered_organ.organid][11])),
0,
float(float(Donor_Accept[offered_organ.organid][11])>67.49),
float(float(Patients_Accept[matching_recipient.id][122])>2),
float(float(Patients_Accept[matching_recipient.id][122])>2.5)
]
accept_prob = nump.exp(nump.dot(patientx,AcceptanceModelS1)) / (1+nump.exp(nump.dot(patientx,AcceptanceModelS1)))
else: #non-status 1 patient
patientx = [1,
noffers,
float(Patients_Accept[matching_recipient.id][122]),
float(Patients_Accept[matching_recipient.id][228]),
float(Patients_Accept[matching_recipient.id][218]),
float(Patients_Accept[matching_recipient.id][219]),
float(Donor_Accept[offered_organ.organid][38]),
float(Donor_Accept[offered_organ.organid][20]),
float(matching_recipient.DSA==offered_organ.DSA),
DSA_Avg_Times[offered_organ.DSA,matching_recipient.DSA],
(Sim.clock - matching_recipient.create_time)*365,
float(offered_organ.ABO==2),
float(offered_organ.ABO==1),
float(Donor_Accept[offered_organ.organid][98]=="Y"),
float(Donor_Accept[offered_organ.organid][93]=="P: Positive"),
float(Donor_Accept[offered_organ.organid][90]=="Y"),
float(Donor_Accept[offered_organ.organid][96]=="Y"),
float(Donor_Accept[offered_organ.organid][30]),
float(Donor_Accept[offered_organ.organid][70]),
float(Donor_Accept[offered_organ.organid][15]),
float(Donor_Accept[offered_organ.organid][100]=="128: Native Hawaiian or Other Pacific Islander"),
float(Donor_Accept[offered_organ.organid][55]),
float(Donor_Accept[offered_organ.organid][92]=="7: GUNSHOT WOUND"),
float(Donor_Accept[offered_organ.organid][91]=="6: DEATH FROM NATURAL CAUSES"),
float(Donor_Accept[offered_organ.organid][95]=="1: NO"),
float(matching_recipient.ABO==3),
float(Patients_Accept[matching_recipient.id][13]=="True"),
float(Patients_Accept[matching_recipient.id][214]=="Y"),
float(Patients_Accept[matching_recipient.id][212]=="True"),
float(Patients_Accept[matching_recipient.id][213]=="True"),
float(Patients_Accept[matching_recipient.id][215]=="Y"),
float(Patients_Accept[matching_recipient.id][227]=="Y"),
float(Patients_Accept[matching_recipient.id][64]==Donor_Accept[offered_organ.organid][10]),
1,
matching_recipient.lMELD,
0,
float(float(Patients_Accept[matching_recipient.id][223])<float(Donor_Accept[offered_organ.organid][11])),
float(matching_recipient.lMELD>12),
float(matching_recipient.lMELD>13),
float(matching_recipient.lMELD>15),
1,
float(matching_recipient.MELD>25),
float(float(matching_recipient.lMELD)+10<float(matching_recipient.MELD)),
float(float(Donor_Accept[offered_organ.organid][89])>326),
float(float(Donor_Accept[offered_organ.organid][89])>623),
float(Donor_Accept[offered_organ.organid][28]),
float(float(Patients_Accept[matching_recipient.id][216])>3000),
float(float(Patients_Accept[matching_recipient.id][217])>0),
float(float(Patients_Accept[matching_recipient.id][222])>154.94),
float(float(Patients_Accept[matching_recipient.id][122])>.7),
float(matching_recipient.Na > 131)
]
accept_prob = nump.exp(nump.dot(patientx,AcceptanceModel)) / (1+nump.exp(nump.dot(patientx,AcceptanceModel)))
#based on acceptance probability, determine to accept or not based on simulation
accept = int(r1 <= accept_prob)
#Return information based on decision
if accept ==1:
return [1,0,matching_recipient.DSA, matching_recipient.id]
else:
return [0,1,[],[]]
##########################################################################################################################################################################
| |
from __future__ import print_function, division
from sympy.core import S, C
from sympy.core.compatibility import u
from sympy.core.exprtools import factor_terms
from sympy.core.function import (Function, Derivative, ArgumentIndexError,
AppliedUndef)
from sympy.core.logic import fuzzy_not
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.core import Add, Mul
from sympy.core.relational import Eq
from sympy.functions.elementary.trigonometric import atan, atan2
###############################################################################
######################### REAL and IMAGINARY PARTS ############################
###############################################################################
class re(Function):
"""Returns real part of expression. This function performs only
elementary analysis and so it will fail to decompose properly
more complicated expressions. If completely simplified result
is needed then use Basic.as_real_imag() or perform complex
expansion on instance of this function.
>>> from sympy import re, im, I, E
>>> from sympy.abc import x, y
>>> re(2*E)
2*E
>>> re(2*I + 17)
17
>>> re(2*I)
0
>>> re(im(x) + x*I + 2)
2
See Also
========
im
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return arg
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return S.Zero
elif arg.is_Function and arg.func is conjugate:
return re(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
elif not term.has(S.ImaginaryUnit) and term.is_real:
excluded.append(term)
else:
# Try to do some advanced expansion. If
# impossible, don't try to do re(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[0])
else:
included.append(term)
if len(args) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) - im(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Returns the real number with a zero complex part.
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return re(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* im(Derivative(self.args[0], x, evaluate=True))
def _sage_(self):
import sage.all as sage
return sage.real_part(self.args[0]._sage_())
class im(Function):
"""
Returns imaginary part of expression. This function performs only
elementary analysis and so it will fail to decompose properly more
complicated expressions. If completely simplified result is needed then
use Basic.as_real_imag() or perform complex expansion on instance of
this function.
Examples
========
>>> from sympy import re, im, E, I
>>> from sympy.abc import x, y
>>> im(2*E)
0
>>> re(2*I + 17)
17
>>> im(x*I)
re(x)
>>> im(re(x) + y)
im(y)
See Also
========
re
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return S.Zero
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return -S.ImaginaryUnit * arg
elif arg.is_Function and arg.func is conjugate:
return -im(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
else:
excluded.append(coeff)
elif term.has(S.ImaginaryUnit) or not term.is_real:
# Try to do some advanced expansion. If
# impossible, don't try to do im(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[1])
else:
included.append(term)
if len(args) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) + re(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Return the imaginary part with a zero real part.
Examples
========
>>> from sympy.functions import im
>>> from sympy import I
>>> im(2 + 3*I).as_real_imag()
(3, 0)
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return im(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* re(Derivative(self.args[0], x, evaluate=True))
def _sage_(self):
import sage.all as sage
return sage.imag_part(self.args[0]._sage_())
###############################################################################
############### SIGN, ABSOLUTE VALUE, ARGUMENT and CONJUGATION ################
###############################################################################
class sign(Function):
"""
Returns the complex sign of an expression:
If the expresssion is real the sign will be:
* 1 if expression is positive
* 0 if expression is equal to zero
* -1 if expression is negative
If the expresssion is imaginary the sign will be:
* I if im(expression) is positive
* -I if im(expression) is negative
Otherwise an unevaluated expression will be returned. When evaluated, the
result (in general) will be ``cos(arg(expr)) + I*sin(arg(expr))``.
Examples
========
>>> from sympy.functions import sign
>>> from sympy.core.numbers import I
>>> sign(-1)
-1
>>> sign(0)
0
>>> sign(-3*I)
-I
>>> sign(1 + I)
sign(1 + I)
>>> _.evalf()
0.707106781186548 + 0.707106781186548*I
See Also
========
Abs, conjugate
"""
is_finite = True
is_complex = True
def doit(self):
if self.args[0].is_nonzero:
return self.args[0] / Abs(self.args[0])
return self
@classmethod
def eval(cls, arg):
# handle what we can
if arg.is_Mul:
c, args = arg.as_coeff_mul()
unk = []
s = sign(c)
for a in args:
if a.is_negative:
s = -s
elif a.is_positive:
pass
else:
ai = im(a)
if a.is_imaginary and ai.is_comparable: # i.e. a = I*real
s *= S.ImaginaryUnit
if ai.is_negative:
# can't use sign(ai) here since ai might not be
# a Number
s = -s
else:
unk.append(a)
if c is S.One and len(unk) == len(args):
return None
return s * cls(arg._new_rawargs(*unk))
if arg is S.NaN:
return S.NaN
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_positive:
return S.One
if arg.is_negative:
return S.NegativeOne
if arg.is_Function:
if arg.func is sign:
return arg
if arg.is_imaginary:
if arg.is_Pow and arg.exp is S.Half:
# we catch this because non-trivial sqrt args are not expanded
# e.g. sqrt(1-sqrt(2)) --x--> to I*sqrt(sqrt(2) - 1)
return S.ImaginaryUnit
arg2 = -S.ImaginaryUnit * arg
if arg2.is_positive:
return S.ImaginaryUnit
if arg2.is_negative:
return -S.ImaginaryUnit
def _eval_Abs(self):
if self.args[0].is_nonzero:
return S.One
def _eval_conjugate(self):
return sign(conjugate(self.args[0]))
def _eval_derivative(self, x):
if self.args[0].is_real:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(self.args[0])
elif self.args[0].is_imaginary:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(-S.ImaginaryUnit * self.args[0])
def _eval_is_nonnegative(self):
if self.args[0].is_nonnegative:
return True
def _eval_is_nonpositive(self):
if self.args[0].is_nonpositive:
return True
def _eval_is_imaginary(self):
return self.args[0].is_imaginary
def _eval_is_integer(self):
return self.args[0].is_real
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_power(self, other):
if (
self.args[0].is_real and
self.args[0].is_nonzero and
other.is_integer and
other.is_even
):
return S.One
def _sage_(self):
import sage.all as sage
return sage.sgn(self.args[0]._sage_())
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((1, arg > 0), (-1, arg < 0), (0, True))
def _eval_simplify(self, ratio, measure):
return self.func(self.args[0].factor())
class Abs(Function):
"""
Return the absolute value of the argument.
This is an extension of the built-in function abs() to accept symbolic
values. If you pass a SymPy expression to the built-in abs(), it will
pass it automatically to Abs().
Examples
========
>>> from sympy import Abs, Symbol, S
>>> Abs(-1)
1
>>> x = Symbol('x', real=True)
>>> Abs(-x)
Abs(x)
>>> Abs(x**2)
x**2
>>> abs(-x) # The Python built-in
Abs(x)
Note that the Python built-in will return either an Expr or int depending on
the argument::
>>> type(abs(-1))
<... 'int'>
>>> type(abs(S.NegativeOne))
<class 'sympy.core.numbers.One'>
Abs will always return a sympy object.
See Also
========
sign, conjugate
"""
is_real = True
is_negative = False
unbranched = True
def fdiff(self, argindex=1):
"""
Get the first derivative of the argument to Abs().
Examples
========
>>> from sympy.abc import x
>>> from sympy.functions import Abs
>>> Abs(-x).fdiff()
sign(x)
"""
if argindex == 1:
return sign(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy.simplify.simplify import signsimp
if hasattr(arg, '_eval_Abs'):
obj = arg._eval_Abs()
if obj is not None:
return obj
# handle what we can
arg = signsimp(arg, evaluate=False)
if arg.is_Mul:
known = []
unk = []
for t in arg.args:
tnew = cls(t)
if tnew.func is cls:
unk.append(tnew.args[0])
else:
known.append(tnew)
known = Mul(*known)
unk = cls(Mul(*unk), evaluate=False) if unk else S.One
return known*unk
if arg is S.NaN:
return S.NaN
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_nonnegative:
return arg
if arg.is_nonpositive:
return -arg
if arg.is_imaginary:
arg2 = -S.ImaginaryUnit * arg
if arg2.is_nonnegative:
return arg2
if arg.is_real is False and arg.is_imaginary is False:
from sympy import expand_mul
return sqrt( expand_mul(arg * arg.conjugate()) )
if arg.is_real is None and arg.is_imaginary is None and arg.is_Add:
if all(a.is_real or a.is_imaginary or (S.ImaginaryUnit*a).is_real for a in arg.args):
from sympy import expand_mul
return sqrt(expand_mul(arg * arg.conjugate()))
if arg.is_Pow:
base, exponent = arg.as_base_exp()
if exponent.is_even and base.is_real:
return arg
if exponent.is_integer and base is S.NegativeOne:
return S.One
def _eval_is_nonzero(self):
return self._args[0].is_nonzero
def _eval_is_positive(self):
return self.is_nonzero
def _eval_power(self, other):
if self.args[0].is_real and other.is_integer:
if other.is_even:
return self.args[0]**other
elif other is not S.NegativeOne and other.is_Integer:
e = other - sign(other)
return self.args[0]**e*self
return
def _eval_nseries(self, x, n, logx):
direction = self.args[0].leadterm(x)[0]
s = self.args[0]._eval_nseries(x, n=n, logx=logx)
when = Eq(direction, 0)
return Piecewise(
((s.subs(direction, 0)), when),
(sign(direction)*s, True),
)
def _sage_(self):
import sage.all as sage
return sage.abs_symbolic(self.args[0]._sage_())
def _eval_derivative(self, x):
if self.args[0].is_real or self.args[0].is_imaginary:
return Derivative(self.args[0], x, evaluate=True) \
* sign(conjugate(self.args[0]))
return (re(self.args[0]) * Derivative(re(self.args[0]), x,
evaluate=True) + im(self.args[0]) * Derivative(im(self.args[0]),
x, evaluate=True)) / Abs(self.args[0])
def _eval_rewrite_as_Heaviside(self, arg):
# Note this only holds for real arg (since Heaviside is not defined
# for complex arguments).
if arg.is_real:
return arg*(C.Heaviside(arg) - C.Heaviside(-arg))
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((arg, arg >= 0), (-arg, True))
class arg(Function):
"""Returns the argument (in radians) of a complex number"""
is_real = True
is_finite = True
@classmethod
def eval(cls, arg):
if not arg.is_Atom:
c, arg_ = factor_terms(arg).as_coeff_Mul()
if arg_.is_Mul:
arg_ = Mul(*[a if (sign(a) not in (-1, 1)) else
sign(a) for a in arg_.args])
arg_ = sign(c)*arg_
else:
arg_ = arg
x, y = re(arg_), im(arg_)
rv = C.atan2(y, x)
if rv.is_number and not rv.atoms(AppliedUndef):
return rv
if arg_ != arg:
return cls(arg_, evaluate=False)
def _eval_derivative(self, t):
x, y = re(self.args[0]), im(self.args[0])
return (x * Derivative(y, t, evaluate=True) - y *
Derivative(x, t, evaluate=True)) / (x**2 + y**2)
def _eval_rewrite_as_atan2(self, arg):
x, y = re(self.args[0]), im(self.args[0])
return atan2(y, x)
class conjugate(Function):
"""
Changes the sign of the imaginary part of a complex number.
Examples
========
>>> from sympy import conjugate, I
>>> conjugate(1 + I)
1 - I
See Also
========
sign, Abs
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_conjugate()
if obj is not None:
return obj
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
def _eval_adjoint(self):
return transpose(self.args[0])
def _eval_conjugate(self):
return self.args[0]
def _eval_derivative(self, x):
if x.is_real:
return conjugate(Derivative(self.args[0], x, evaluate=True))
elif x.is_imaginary:
return -conjugate(Derivative(self.args[0], x, evaluate=True))
def _eval_transpose(self):
return adjoint(self.args[0])
class transpose(Function):
"""
Linear map transposition.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_transpose()
if obj is not None:
return obj
def _eval_adjoint(self):
return conjugate(self.args[0])
def _eval_conjugate(self):
return adjoint(self.args[0])
def _eval_transpose(self):
return self.args[0]
class adjoint(Function):
"""
Conjugate transpose or Hermite conjugation.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_adjoint()
if obj is not None:
return obj
obj = arg._eval_transpose()
if obj is not None:
return conjugate(obj)
def _eval_adjoint(self):
return self.args[0]
def _eval_conjugate(self):
return transpose(self.args[0])
def _eval_transpose(self):
return conjugate(self.args[0])
def _latex(self, printer, exp=None, *args):
arg = printer._print(self.args[0])
tex = r'%s^{\dag}' % arg
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, printer._print(exp))
return tex
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if printer._use_unicode:
pform = pform**prettyForm(u('\u2020'))
else:
pform = pform**prettyForm('+')
return pform
###############################################################################
############### HANDLING OF POLAR NUMBERS #####################################
###############################################################################
class polar_lift(Function):
"""
Lift argument to the Riemann surface of the logarithm, using the
standard branch.
>>> from sympy import Symbol, polar_lift, I
>>> p = Symbol('p', polar=True)
>>> x = Symbol('x')
>>> polar_lift(4)
4*exp_polar(0)
>>> polar_lift(-4)
4*exp_polar(I*pi)
>>> polar_lift(-I)
exp_polar(-I*pi/2)
>>> polar_lift(I + 2)
polar_lift(2 + I)
>>> polar_lift(4*x)
4*polar_lift(x)
>>> polar_lift(4*p)
4*p
See Also
========
sympy.functions.elementary.exponential.exp_polar
periodic_argument
"""
is_polar = True
is_comparable = False # Cannot be evalf'd.
@classmethod
def eval(cls, arg):
from sympy import exp_polar, pi, I, arg as argument
if arg.is_number:
ar = argument(arg)
#if not ar.has(argument) and not ar.has(atan):
if ar in (0, pi/2, -pi/2, pi):
return exp_polar(I*ar)*abs(arg)
if arg.is_Mul:
args = arg.args
else:
args = [arg]
included = []
excluded = []
positive = []
for arg in args:
if arg.is_polar:
included += [arg]
elif arg.is_positive:
positive += [arg]
else:
excluded += [arg]
if len(excluded) < len(args):
if excluded:
return Mul(*(included + positive))*polar_lift(Mul(*excluded))
elif included:
return Mul(*(included + positive))
else:
return Mul(*positive)*exp_polar(0)
def _eval_evalf(self, prec):
""" Careful! any evalf of polar numbers is flaky """
return self.args[0]._eval_evalf(prec)
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
class periodic_argument(Function):
"""
Represent the argument on a quotient of the Riemann surface of the
logarithm. That is, given a period P, always return a value in
(-P/2, P/2], by using exp(P*I) == 1.
>>> from sympy import exp, exp_polar, periodic_argument, unbranched_argument
>>> from sympy import I, pi
>>> unbranched_argument(exp(5*I*pi))
pi
>>> unbranched_argument(exp_polar(5*I*pi))
5*pi
>>> periodic_argument(exp_polar(5*I*pi), 2*pi)
pi
>>> periodic_argument(exp_polar(5*I*pi), 3*pi)
-pi
>>> periodic_argument(exp_polar(5*I*pi), pi)
0
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
principal_branch
"""
@classmethod
def _getunbranched(cls, ar):
from sympy import exp_polar, log, polar_lift
if ar.is_Mul:
args = ar.args
else:
args = [ar]
unbranched = 0
for a in args:
if not a.is_polar:
unbranched += arg(a)
elif a.func is exp_polar:
unbranched += a.exp.as_real_imag()[1]
elif a.is_Pow:
re, im = a.exp.as_real_imag()
unbranched += re*unbranched_argument(
a.base) + im*log(abs(a.base))
elif a.func is polar_lift:
unbranched += arg(a.args[0])
else:
return None
return unbranched
@classmethod
def eval(cls, ar, period):
# Our strategy is to evaluate the argument on the Riemann surface of the
# logarithm, and then reduce.
# NOTE evidently this means it is a rather bad idea to use this with
# period != 2*pi and non-polar numbers.
from sympy import ceiling, oo, atan2, atan, polar_lift, pi, Mul
if not period.is_positive:
return None
if period == oo and isinstance(ar, principal_branch):
return periodic_argument(*ar.args)
if ar.func is polar_lift and period >= 2*pi:
return periodic_argument(ar.args[0], period)
if ar.is_Mul:
newargs = [x for x in ar.args if not x.is_positive]
if len(newargs) != len(ar.args):
return periodic_argument(Mul(*newargs), period)
unbranched = cls._getunbranched(ar)
if unbranched is None:
return None
if unbranched.has(periodic_argument, atan2, arg, atan):
return None
if period == oo:
return unbranched
if period != oo:
n = ceiling(unbranched/period - S(1)/2)*period
if not n.has(ceiling):
return unbranched - n
def _eval_evalf(self, prec):
from sympy import ceiling, oo
z, period = self.args
if period == oo:
unbranched = periodic_argument._getunbranched(z)
if unbranched is None:
return self
return unbranched._eval_evalf(prec)
ub = periodic_argument(z, oo)._eval_evalf(prec)
return (ub - ceiling(ub/period - S(1)/2)*period)._eval_evalf(prec)
def unbranched_argument(arg):
from sympy import oo
return periodic_argument(arg, oo)
class principal_branch(Function):
"""
Represent a polar number reduced to its principal branch on a quotient
of the Riemann surface of the logarithm.
This is a function of two arguments. The first argument is a polar
number `z`, and the second one a positive real number of infinity, `p`.
The result is "z mod exp_polar(I*p)".
>>> from sympy import exp_polar, principal_branch, oo, I, pi
>>> from sympy.abc import z
>>> principal_branch(z, oo)
z
>>> principal_branch(exp_polar(2*pi*I)*3, 2*pi)
3*exp_polar(0)
>>> principal_branch(exp_polar(2*pi*I)*3*z, 2*pi)
3*principal_branch(z, 2*pi)
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
periodic_argument
"""
is_polar = True
is_comparable = False # cannot always be evalf'd
@classmethod
def eval(self, x, period):
from sympy import oo, exp_polar, I, Mul, polar_lift, Symbol
if isinstance(x, polar_lift):
return principal_branch(x.args[0], period)
if period == oo:
return x
ub = periodic_argument(x, oo)
barg = periodic_argument(x, period)
if ub != barg and not ub.has(periodic_argument) \
and not barg.has(periodic_argument):
pl = polar_lift(x)
def mr(expr):
if not isinstance(expr, Symbol):
return polar_lift(expr)
return expr
pl = pl.replace(polar_lift, mr)
if not pl.has(polar_lift):
res = exp_polar(I*(barg - ub))*pl
if not res.is_polar and not res.has(exp_polar):
res *= exp_polar(0)
return res
if not x.free_symbols:
c, m = x, ()
else:
c, m = x.as_coeff_mul(*x.free_symbols)
others = []
for y in m:
if y.is_positive:
c *= y
else:
others += [y]
m = tuple(others)
arg = periodic_argument(c, period)
if arg.has(periodic_argument):
return None
if arg.is_number and (unbranched_argument(c) != arg or
(arg == 0 and m != () and c != 1)):
if arg == 0:
return abs(c)*principal_branch(Mul(*m), period)
return principal_branch(exp_polar(I*arg)*Mul(*m), period)*abs(c)
if arg.is_number and ((abs(arg) < period/2) == True or arg == period/2) \
and m == ():
return exp_polar(arg*I)*abs(c)
def _eval_evalf(self, prec):
from sympy import exp, pi, I
z, period = self.args
p = periodic_argument(z, period)._eval_evalf(prec)
if abs(p) > pi or p == -pi:
return self # Cannot evalf for this argument.
return (abs(z)*exp(I*p))._eval_evalf(prec)
# /cyclic/
from sympy.core import basic as _
_.abs_ = Abs
del _
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Wrapper for the common graph algorithms.
"""
import sys
import logging
import networkx as nx
from collections import deque
from string import maketrans
from jcvi.utils.iter import pairwise
from jcvi.formats.base import must_open
"""
Bidirectional graph.
"""
dirs = (">", "<")
trans = maketrans("+?-", ">><")
class BiNode (object):
def __init__(self, v):
self.v = v
self.ins = []
self.outs = []
def get_next(self, tag="<"):
"""
This function is tricky and took me a while to figure out.
The tag specifies the direction where the current edge came from.
tag ntag
---> V >----> U
cur next
This means the next vertex should follow the outs since this tag is
inward '<'. Check if there are multiple branches if len(L) == 1, and
also check if the next it finds has multiple incoming edges though if
len(B) == 1.
"""
next, ntag = None, None
L = self.outs if tag == "<" else self.ins
if len(L) == 1:
e, = L
if e.v1.v == self.v:
next, ntag = e.v2, e.o2
ntag = "<" if ntag == ">" else ">" # Flip tag if on other end
else:
next, ntag = e.v1, e.o1
if next: # Validate the next vertex
B = next.ins if ntag == "<" else next.outs
if len(B) > 1:
return None, None
return next, ntag
def __str__(self):
return str(self.v)
__repr__ = __str__
class BiEdge (object):
def __init__(self, v1, v2, o1, o2, color="black", length=None):
self.v1 = v1
self.v2 = v2
o1 = o1.translate(trans)
o2 = o2.translate(trans)
assert o1 in dirs and o2 in dirs
self.o1 = o1
self.o2 = o2
if v1 > v2:
self.flip()
self.color = color
self.length = length
def __str__(self):
return "".join(str(x) for x in \
(self.v1, self.o1, "--", self.o2, self.v2))
def flip(self):
self.v2, self.v1 = self.v1, self.v2
o1, o2 = self.o1, self.o2
self.o1 = ">" if o2 == "<" else "<"
self.o2 = ">" if o1 == "<" else "<"
class BiGraph (object):
def __init__(self):
self.nodes = {}
self.edges = {}
def __str__(self):
return "BiGraph with {0} nodes and {1} edges".\
format(len(self.nodes), len(self.edges))
def add_node(self, v):
if v not in self.nodes:
self.nodes[v] = BiNode(v)
def add_edge(self, e):
v1, v2 = e.v1, e.v2
assert isinstance(e, BiEdge)
for v in (v1, v2):
self.add_node(v)
n1 = self.nodes.get(v1)
n2 = self.nodes.get(v2)
l = n1.outs if e.o1 == ">" else n1.ins
r = n2.ins if e.o2 == ">" else n2.outs
l.append(e)
r.append(e)
e.v1, e.v2 = n1, n2
self.edges[(v1, v2)] = e
def get_node(self, v):
return self.nodes[v]
def get_edge(self, av, bv):
flip = False
if av > bv:
av, bv = bv, av
flip = True
e = self.edges[(av, bv)]
if flip:
e.flip()
return e
def iter_paths(self):
discovered = set()
for v, vv in self.nodes.items():
if v in discovered:
continue
path = deque([vv])
#print "cur", v
discovered.add(v)
prev, ptag = vv.get_next(tag=">")
while prev:
#print prev, ptag
if prev.v in discovered:
break
path.appendleft(prev)
discovered.add(prev.v)
prev, ptag = prev.get_next(tag=ptag)
next, ntag = vv.get_next(tag="<")
while next:
#print next, ntag
if next.v in discovered:
break
path.append(next)
discovered.add(next.v)
next, ntag = next.get_next(tag=ntag)
#discovered |= set(x.v for x in path)
yield path
def path(self, path, flip=False):
oo = []
if len(path) == 1:
m = "Singleton {0}".format(path[0])
oo.append((path[0].v, True))
return m, oo
edges = []
for a, b in pairwise(path):
av, bv = a.v, b.v
e = self.get_edge(av, bv)
if not oo: # First edge imports two nodes
oo.append((e.v1.v, e.o1 == ">"))
last = oo[-1]
assert last == (e.v1.v, e.o1 == ">")
oo.append((e.v2.v, e.o2 == ">"))
if flip:
se = str(e)
e.flip()
else:
se = str(e)
edges.append(se)
return "|".join(edges), oo
def read(self, filename, color="black"):
fp = open(filename)
nedges = 0
for row in fp:
a, b = row.strip().split("--")
oa = a[-1]
ob = b[0]
a, b = a.strip("<>"), b.strip("<>")
self.add_edge(BiEdge(a, b, oa, ob, color=color))
nedges += 1
logging.debug("A total of {0} edges imported from `{1}` (color={2}).".
format(nedges, filename, color))
def write(self, filename="stdout"):
fw = must_open(filename, "w")
for e in self.edges.values():
print >> fw, e
logging.debug("Graph written to `{0}`.".format(filename))
def draw(self, pngfile, dpi=96, verbose=False, namestart=0,
nodehighlight=None, prog="circo"):
import pygraphviz as pgv
G = pgv.AGraph()
for e in self.edges.values():
arrowhead = (e.o1 == ">")
arrowtail = (e.o2 == "<")
if e.o1 != e.o2: # Not sure why this is necessary
arrowhead = not arrowhead
arrowtail = not arrowtail
arrowhead = "normal" if arrowhead else "inv"
arrowtail = "normal" if arrowtail else "inv"
v1, v2 = e.v1, e.v2
v1, v2 = str(v1)[namestart:], str(v2)[namestart:]
G.add_edge(v1, v2, color=e.color,
arrowhead=arrowhead, arrowtail=arrowtail)
if nodehighlight:
for n in nodehighlight:
n = n[namestart:]
n = G.get_node(n)
n.attr["shape"] = "box"
G.graph_attr.update(dpi=str(dpi))
if verbose:
G.write(sys.stderr)
G.draw(pngfile, prog=prog)
logging.debug("Graph written to `{0}`.".format(pngfile))
def get_next(self, node, tag="<"):
return self.get_node(node).get_next(tag)
def get_path(self, n1, n2, tag="<"):
# return all intermediate nodes on path n1 -> n2
path = deque()
next, ntag = self.get_next(n1, tag=tag)
while next:
if next.v == n2:
return path
path.append((next, ntag))
next, ntag = next.get_next(tag=ntag)
return path if n2 is None else None
def graph_stats(G, diameter=False):
logging.debug("Graph stats: |V|={0}, |E|={1}".format(len(G), G.size()))
if diameter:
d = max(nx.diameter(H) for H in nx.connected_component_subgraphs(G))
logging.debug("Graph diameter: {0}".format(d))
def graph_local_neighborhood(G, query=-1, maxdegree=10, maxsize=10000):
from random import choice
c = [k for k, d in G.degree().iteritems() if d > maxdegree]
if c:
logging.debug("Remove {0} nodes with deg > {1}".format(len(c), maxdegree))
G.remove_nodes_from(c)
if query == -1:
query = choice(G.nodes())
logging.debug("BFS search from node {0}".format(query))
queue = set([query])
# BFS search of max depth
seen = set([query])
depth = 0
while True:
neighbors = set()
for q in queue:
neighbors |= set(G.neighbors(q))
queue = neighbors - seen
if not queue:
break
if len(seen | queue) > maxsize:
break
seen |= queue
#print sorted(list(seen))
print >> sys.stderr, "iter: {0}, graph size={1}".format(depth, len(seen))
depth += 1
return G.subgraph(seen)
def graph_simplify(G):
"""
Simplify big graphs: remove spurs and contract unique paths.
"""
spurs = []
path_nodes = []
for k, d in G.degree().iteritems():
if d == 1:
spurs.append(k)
elif d == 2:
path_nodes.append(k)
logging.debug("Remove {0} spurs.".format(len(spurs)))
G.remove_nodes_from(spurs)
SG = G.subgraph(path_nodes)
cc = nx.connected_components(SG)
for c in cc:
if len(c) == 1:
continue
c = set(c)
neighbors = set()
for x in c:
neighbors |= set(G.neighbors(x))
neighbors -= c
newtag = list(c)[0] + "*"
for n in neighbors:
G.add_edge(newtag, n)
G.remove_nodes_from(c)
logging.debug("Contract {0} path nodes into {1} nodes.".\
format(len(path_nodes), len(cc)))
def bigraph_test():
g = BiGraph()
g.add_edge(BiEdge(1, 2, ">", "<"))
g.add_edge(BiEdge(2, 3, "<", "<", color="red"))
g.add_edge(BiEdge(2, 3, ">", ">", color="blue"))
g.add_edge(BiEdge(5, 3, ">", ">"))
g.add_edge(BiEdge(4, 3, "<", ">"))
g.add_edge(BiEdge(4, 6, ">", ">"))
g.add_edge(BiEdge(7, 1, ">", ">"))
g.add_edge(BiEdge(7, 5, "<", ">"))
g.add_edge(BiEdge(8, 6, ">", "<"))
print g
g.write()
for path in g.iter_paths():
p, oo = g.path(path)
print p
print oo
#g.draw("demo.png", verbose=True)
def update_weight(G, a, b, w):
if G.has_edge(a, b): # Parallel edges found!
G[a][b]['weight'] += w
else:
G.add_edge(a, b, weight=w)
def make_paths(paths, weights=None):
"""
Zip together paths. Called by merge_paths().
"""
npaths = len(paths)
weights = weights or [1] * npaths
assert len(paths) == len(weights)
G = nx.DiGraph()
for path, w in zip(paths, weights):
for a, b in pairwise(path):
update_weight(G, a, b, w)
return G
def reduce_paths(G):
"""
Make graph into a directed acyclic graph (DAG).
"""
from jcvi.algorithms.lpsolve import min_feedback_arc_set
while not nx.is_directed_acyclic_graph(G):
edges = []
for a, b, w in G.edges_iter(data=True):
w = w['weight']
edges.append((a, b, w))
mf, mf_score = min_feedback_arc_set(edges)
for a, b, w in mf:
G.remove_edge(a, b)
assert nx.is_directed_acyclic_graph(G)
G = transitive_reduction(G)
return G
def draw_graph(G, pngfile, prog="dot"):
G = nx.to_agraph(G)
G.draw(pngfile, prog=prog)
logging.debug("Graph written to `{0}`.".format(pngfile))
def transitive_reduction(G):
"""
Returns a transitive reduction of a graph. The original graph
is not modified.
A transitive reduction H of G has a path from x to y if and
only if there was a path from x to y in G. Deleting any edge
of H destroys this property. A transitive reduction is not
unique in general. A transitive reduction has the same
transitive closure as the original graph.
A transitive reduction of a complete graph is a tree. A
transitive reduction of a tree is itself.
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 4)])
>>> H = transitive_reduction(G)
>>> H.edges()
[(1, 2), (2, 3), (3, 4)]
"""
H = G.copy()
for a, b, w in G.edges_iter(data=True):
# Try deleting the edge, see if we still have a path
# between the vertices
H.remove_edge(a, b)
if not nx.has_path(H, a, b): # we shouldn't have deleted it
H.add_edge(a, b, w)
return H
def merge_paths(paths, weights=None):
"""
Zip together sorted lists.
>>> paths = [[1, 2, 3], [1, 3, 4], [2, 4, 5]]
>>> G = merge_paths(paths)
>>> nx.topological_sort(G)
[1, 2, 3, 4, 5]
>>> paths = [[1, 2, 3, 4], [1, 2, 3, 2, 4]]
>>> G = merge_paths(paths, weights=(1, 2))
>>> nx.topological_sort(G)
[1, 2, 3, 4]
"""
G = make_paths(paths, weights=weights)
G = reduce_paths(G)
return G
def longest_path_weighted_nodes(G, source, target, weights=None):
"""
The longest path problem is the problem of finding a simple path of maximum
length in a given graph. While for general graph, this problem is NP-hard,
but if G is a directed acyclic graph (DAG), longest paths in G can be found
in linear time with dynamic programming.
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, "M"), (3, "M")])
>>> longest_path_weighted_nodes(G, 1, "M", weights={1: 1, 2: 1, 3: 2, "M": 1})
([1, 3, 'M'], 4)
"""
assert nx.is_directed_acyclic_graph(G)
tree = nx.topological_sort(G)
node_to_index = dict((t, i) for i, t in enumerate(tree))
nnodes = len(tree)
weights = [weights.get(x, 1) for x in tree] if weights else [1] * nnodes
score, fromc = weights[:], [-1] * nnodes
si = node_to_index[source]
ti = node_to_index[target]
for a in tree[si: ti]:
ai = node_to_index[a]
for b, w in G[a].items():
bi = node_to_index[b]
w = w.get('weight', 1)
d = score[ai] + weights[bi] * w # Favor heavier edges
if d <= score[bi]:
continue
score[bi] = d # Update longest distance so far
fromc[bi] = ai
# Backtracking
path = []
while ti != -1:
path.append(ti)
ti = fromc[ti]
path = [tree[x] for x in path[::-1]]
return path, score[ti]
if __name__ == '__main__':
import doctest
doctest.testmod()
#bigraph_test()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
return input_tensors
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
def collective_all_reduce():
"""Call collective allreduce."""
assert not context.executing_eagerly()
out_tensors = []
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
if context.executing_eagerly():
# Collective ops will block unless they are executed concurrently such as in
# a graph or a defun.
collective_all_reduce = def_function.function(collective_all_reduce)
return collective_all_reduce()
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any(n in alg for n in ['pscpu', 'psgpu'])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_util._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_util._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(value.values)
else:
return False
def is_indexed_slices(value):
if isinstance(value, ops.IndexedSlices):
return True
assert isinstance(value, value_lib.DistributedValues)
return all([isinstance(v, ops.IndexedSlices) for v in value.values])
def split_by_sparsity(values):
"""Split values into dense and sparse values.
Args:
values: a list of tensors or `PerReplica`s.
Returns:
Four lists:
a list of dense values, a list of their indices in `values` and
a list of sparse values, a list of their indices in `values`.
"""
dense_values = []
dense_indices = []
sparse_values = []
sparse_indices = []
for i, v in enumerate(values):
if is_indexed_slices(v):
sparse_values.append(v)
sparse_indices.append(i)
else:
dense_values.append(v)
dense_indices.append(i)
return dense_values, dense_indices, sparse_values, sparse_indices
def stitch_values(values_and_indices_list):
"""Stitch values together according to their indices.
Args:
values_and_indices_list: a list of tuples of values and indices indicating
the values and postions in the returned list.
Returns:
a stitched list of values.
"""
length = 0
for values_and_indices in values_and_indices_list:
length += len(values_and_indices[0])
result = [None] * length
for values_and_indices in values_and_indices_list:
if values_and_indices and values_and_indices[0]:
for v, i in zip(*values_and_indices):
assert result[i] is None
result[i] = v
return result
| |
import os
from arcproject.waterquality import classes
from arcproject.scripts.load_data_bulk import slurp
# path to location with data
data = r"C:\Users\Andy\Desktop\ArcData" # or location on x drive
def jan():
print("January 2016")
path = os.path.join(data, "Jan_2016")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.gain_pattern = "*WQ_*"
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', "Arc_022016"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def feb():
print("Feburary 2016")
path = os.path.join(data, "Feb_2016")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.gain_pattern = "*WQ_*"
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', "Arc_022016"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def mar():
print("March 2016")
path = os.path.join(data, "Mar_2016")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def apr():
print("April 2016")
path = os.path.join(data, "Apr_2016")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def may():
print("May 2016")
path = os.path.join(data, "May_2016")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def jun():
print("June 2016")
path = os.path.join(data, "Jun_2016")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr", ".mxd"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def jul():
print("July 2016")
path = os.path.join(data, "Jul_2016")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def aug():
print("Aug 2016")
path = os.path.join(data, "Aug_2016", "Arc_082316")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Aug_2016", "Arc_082416")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 3, "gain_part": 4}
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def sep():
print("Sep 2016")
path = os.path.join(data, "Sep_2016", "092716")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Sep_2016", "092816")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def oct():
print("Oct 2016")
path = os.path.join(data, "Oct_2016", "102516")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Oct_2016", "102616")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def nov():
print("Nov 2016")
path = os.path.join(data, "Nov_2016", "111816")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Nov_2016", "112016")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def dec():
print("Dec 2016")
path = os.path.join(data, "Dec_2016")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
s.gain_pattern = '*WQ_*'
s.skipext = [".csv", ".xlsx", ".xls", ".dbf", ".prj", ".shp", ".shx", ".lyr"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def main(month="ALL"):
if month == "ALL":
jan()
feb()
mar()
apr()
may()
jun()
jul()
aug()
sep()
oct()
nov()
dec()
else:
month
if __name__ == '__main__':
main()
| |
from __future__ import unicode_literals
try:
from urllib import parse as urlparse
except ImportError:
import urlparse # Python 2
try:
basestring
except NameError:
basestring = str # Python 3
from django import forms
from django.core import checks, exceptions
from django.contrib.admin.filters import FieldListFilter
from django.db.models.fields import CharField, BLANK_CHOICE_DASH
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape as escape_html
from django.utils.functional import lazy
from django_countries import countries, ioc_data, widgets, filters
from django_countries.conf import settings
def country_to_text(value):
if hasattr(value, 'code'):
value = value.code
if value is None:
return None
return force_text(value)
class TemporaryEscape(object):
__slots__ = ['country', 'original_escape']
def __init__(self, country):
self.country = country
def __bool__(self):
return self.country._escape
__nonzero__ = __bool__
def __enter__(self):
self.original_escape = self.country._escape
self.country._escape = True
def __exit__(self, type, value, traceback):
self.country._escape = self.original_escape
@python_2_unicode_compatible
class Country(object):
def __init__(
self, code, flag_url=None, str_attr='code', custom_countries=None):
self.flag_url = flag_url
self._escape = False
self._str_attr = str_attr
if custom_countries is countries:
custom_countries = None
self.custom_countries = custom_countries
# Attempt to convert the code to the alpha2 equivalent, but this
# is not meant to be full validation so use the given code if no
# match was found.
self.code = self.countries.alpha2(code) or code
def __str__(self):
return force_text(getattr(self, self._str_attr) or '')
def __eq__(self, other):
return force_text(self.code or '') == force_text(other or '')
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(force_text(self))
def __repr__(self):
args = ['code={country.code!r}']
if self.flag_url is not None:
args.append('flag_url={country.flag_url!r}')
if self._str_attr != 'code':
args.append('str_attr={country._str_attr!r}')
args = ', '.join(args).format(country=self)
return '{name}({args})'.format(name=self.__class__.__name__, args=args)
def __bool__(self):
return bool(self.code)
__nonzero__ = __bool__ # Python 2 compatibility.
def __len__(self):
return len(force_text(self))
@property
def countries(self):
return self.custom_countries or countries
@property
def escape(self):
return TemporaryEscape(self)
def maybe_escape(self, text):
if not self.escape:
return text
return escape_html(text)
@property
def name(self):
return self.maybe_escape(self.countries.name(self.code))
@property
def alpha3(self):
return self.countries.alpha3(self.code)
@property
def numeric(self):
return self.countries.numeric(self.code)
@property
def numeric_padded(self):
return self.countries.numeric(self.code, padded=True)
@property
def flag(self):
if not self.code:
return ''
flag_url = self.flag_url
if flag_url is None:
flag_url = settings.COUNTRIES_FLAG_URL
url = flag_url.format(
code_upper=self.code, code=self.code.lower())
if not url:
return ''
url = urlparse.urljoin(settings.STATIC_URL, url)
return self.maybe_escape(url)
@property
def flag_css(self):
"""
Output the css classes needed to display an HTML element as a flag
sprite.
Requires the use of 'flags/sprite.css' or 'flags/sprite-hq.css'.
Usage example::
<i class="{{ ctry.flag_css }}" aria-label="{{ ctry.code }}></i>
"""
if not self.code:
return ''
return 'flag-sprite flag-{} flag-_{}'.format(*self.code.lower())
@property
def unicode_flag(self):
"""
Generate a unicode flag for the given country.
The logic for how these are determined can be found at:
https://en.wikipedia.org/wiki/Regional_Indicator_Symbol
Currently, these glyphs appear to only be supported on OS X and iOS.
"""
if not self.code:
return ''
# Don't really like magic numbers, but this is the code point for [A]
# (Regional Indicator A), minus the code point for ASCII A. By adding
# this to the uppercase characters making up the ISO 3166-1 alpha-2
# codes we can get the flag.
OFFSET = 127397
points = [ord(x) + OFFSET for x in self.code.upper()]
try:
# Python 3 is simple: we can just chr() the unicode points.
return chr(points[0]) + chr(points[1])
except ValueError:
# Python 2 requires us to be a bit more creative. We could use
# unichr(), but that only works if the python has been compiled
# with wide unicode support. This method should always work.
return ('\\U%08x\\U%08x' % tuple(points)).decode('unicode-escape')
@staticmethod
def country_from_ioc(ioc_code, flag_url=''):
code = ioc_data.IOC_TO_ISO.get(ioc_code, '')
if code == '':
return None
return Country(code, flag_url=flag_url)
@property
def ioc_code(self):
return ioc_data.ISO_TO_IOC.get(self.code, '')
class CountryDescriptor(object):
"""
A descriptor for country fields on a model instance. Returns a Country when
accessed so you can do things like::
>>> from people import Person
>>> person = Person.object.get(name='Chris')
>>> person.country.name
'New Zealand'
>>> person.country.flag
'/static/flags/nz.gif'
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
return self
# Check in case this field was deferred.
if self.field.name not in instance.__dict__:
instance.refresh_from_db(fields=[self.field.name])
value = instance.__dict__[self.field.name]
if self.field.multiple:
return [self.country(code) for code in value]
return self.country(value)
def country(self, code):
return Country(
code=code, flag_url=self.field.countries_flag_url,
custom_countries=self.field.countries)
def __set__(self, instance, value):
value = self.field.get_clean_value(value)
instance.__dict__[self.field.name] = value
class LazyChoicesMixin(widgets.LazyChoicesMixin):
def _set_choices(self, value):
"""
Also update the widget's choices.
"""
super(LazyChoicesMixin, self)._set_choices(value)
self.widget.choices = value
class LazyTypedChoiceField(LazyChoicesMixin, forms.TypedChoiceField):
"""
A form TypedChoiceField that respects choices being a lazy object.
"""
widget = widgets.LazySelect
class LazyTypedMultipleChoiceField(
LazyChoicesMixin, forms.TypedMultipleChoiceField):
"""
A form TypedMultipleChoiceField that respects choices being a lazy object.
"""
widget = widgets.LazySelectMultiple
class CountryField(CharField):
"""
A country field for Django models that provides all ISO 3166-1 countries as
choices.
"""
descriptor_class = CountryDescriptor
def __init__(self, *args, **kwargs):
countries_class = kwargs.pop('countries', None)
self.countries = countries_class() if countries_class else countries
self.countries_flag_url = kwargs.pop('countries_flag_url', None)
self.blank_label = kwargs.pop('blank_label', None)
self.multiple = kwargs.pop('multiple', None)
kwargs['choices'] = self.countries
if self.multiple:
kwargs['max_length'] = len(self.countries) * 3 - 1
else:
kwargs['max_length'] = 2
super(CharField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(CountryField, self).check(**kwargs)
errors.extend(self._check_multiple())
return errors
def _check_multiple(self):
if not self.multiple or not self.null:
return []
hint = 'Remove null=True argument on the field'
if not self.blank:
hint += ' (just add blank=True if you want to allow no selection)'
hint += '.'
return [
checks.Error(
'Field specifies multiple=True, so should not be null.',
obj=self,
id='django_countries.E100',
hint=hint,
)
]
def get_internal_type(self):
return "CharField"
def contribute_to_class(self, cls, name):
super(CountryField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self))
def pre_save(self, *args, **kwargs):
"Returns field's value just before saving."
value = super(CharField, self).pre_save(*args, **kwargs)
return self.get_prep_value(value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = self.get_clean_value(value)
if self.multiple:
if value:
value = ','.join(value)
else:
value = ''
return super(CharField, self).get_prep_value(value)
def get_clean_value(self, value):
if value is None:
return None
if not self.multiple:
return country_to_text(value)
if isinstance(value, (basestring, Country)):
if isinstance(value, basestring) and ',' in value:
value = value.split(',')
else:
value = [value]
return list(filter(None, [country_to_text(c) for c in value]))
def deconstruct(self):
"""
Remove choices from deconstructed field, as this is the country list
and not user editable.
Not including the ``blank_label`` property, as this isn't database
related.
"""
name, path, args, kwargs = super(CountryField, self).deconstruct()
kwargs.pop('choices')
if self.multiple: # multiple determines the length of the field
kwargs['multiple'] = self.multiple
if self.countries is not countries:
# Include the countries class if it's not the default countries
# instance.
kwargs['countries'] = self.countries.__class__
return name, path, args, kwargs
def get_choices(
self, include_blank=True, blank_choice=None, *args, **kwargs):
if blank_choice is None:
if self.blank_label is None:
blank_choice = BLANK_CHOICE_DASH
else:
blank_choice = [('', self.blank_label)]
if self.multiple:
include_blank = False
return super(CountryField, self).get_choices(
include_blank=include_blank, blank_choice=blank_choice, *args,
**kwargs)
get_choices = lazy(get_choices, list)
def formfield(self, **kwargs):
kwargs.setdefault(
'choices_form_class',
LazyTypedMultipleChoiceField
if self.multiple else LazyTypedChoiceField)
if 'coerce' not in kwargs:
kwargs['coerce'] = super(CountryField, self).to_python
field = super(CharField, self).formfield(**kwargs)
return field
def to_python(self, value):
if not self.multiple:
return super(CountryField, self).to_python(value)
if not value:
return value
if isinstance(value, basestring):
value = value.split(',')
output = []
for item in value:
output.append(super(CountryField, self).to_python(item))
return output
def validate(self, value, model_instance):
"""
Use custom validation for when using a multiple countries field.
"""
if not self.multiple:
return super(CountryField, self).validate(value, model_instance)
if not self.editable:
# Skip validation for non-editable fields.
return
if value:
choices = [option_key for option_key, option_value in self.choices]
for single_value in value:
if single_value not in choices:
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': single_value},
)
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(
self.error_messages['blank'], code='blank')
def value_to_string(self, obj):
"""
Ensure data is serialized correctly.
"""
value = self.value_from_object(obj)
return self.get_prep_value(value)
FieldListFilter.register(
lambda f: isinstance(f, CountryField), filters.CountryFilter)
| |
# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import os
import resource
import signal
import sys
import time
import ovs.dirs
import ovs.fatal_signal
#import ovs.lockfile
import ovs.process
import ovs.socket_util
import ovs.timeval
import ovs.util
import ovs.vlog
vlog = ovs.vlog.Vlog("daemon")
# --detach: Should we run in the background?
_detach = False
# --pidfile: Name of pidfile (null if none).
_pidfile = None
# Our pidfile's inode and device, if we have created one.
_pidfile_dev = None
_pidfile_ino = None
# --overwrite-pidfile: Create pidfile even if one already exists and is locked?
_overwrite_pidfile = False
# --no-chdir: Should we chdir to "/"?
_chdir = True
# --monitor: Should a supervisory process monitor the daemon and restart it if
# it dies due to an error signal?
_monitor = False
# File descriptor used by daemonize_start() and daemonize_complete().
_daemonize_fd = None
RESTART_EXIT_CODE = 5
def make_pidfile_name(name):
"""Returns the file name that would be used for a pidfile if 'name' were
provided to set_pidfile()."""
if name is None or name == "":
return "%s/%s.pid" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME)
else:
return ovs.util.abs_file_name(ovs.dirs.RUNDIR, name)
def set_pidfile(name):
"""Sets up a following call to daemonize() to create a pidfile named
'name'. If 'name' begins with '/', then it is treated as an absolute path.
Otherwise, it is taken relative to ovs.util.RUNDIR, which is
$(prefix)/var/run by default.
If 'name' is null, then ovs.util.PROGRAM_NAME followed by ".pid" is
used."""
global _pidfile
_pidfile = make_pidfile_name(name)
def set_no_chdir():
"""Sets that we do not chdir to "/"."""
global _chdir
_chdir = False
def ignore_existing_pidfile():
"""Normally, daemonize() or daemonize_start() will terminate the program
with a message if a locked pidfile already exists. If this function is
called, an existing pidfile will be replaced, with a warning."""
global _overwrite_pidfile
_overwrite_pidfile = True
def set_detach():
"""Sets up a following call to daemonize() to detach from the foreground
session, running this process in the background."""
global _detach
_detach = True
def get_detach():
"""Will daemonize() really detach?"""
return _detach
def set_monitor():
"""Sets up a following call to daemonize() to fork a supervisory process to
monitor the daemon and restart it if it dies due to an error signal."""
global _monitor
_monitor = True
def _fatal(msg):
vlog.err(msg)
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def _make_pidfile():
"""If a pidfile has been configured, creates it and stores the running
process's pid in it. Ensures that the pidfile will be deleted when the
process exits."""
pid = os.getpid()
# Create a temporary pidfile.
tmpfile = "%s.tmp%d" % (_pidfile, pid)
ovs.fatal_signal.add_file_to_unlink(tmpfile)
try:
# This is global to keep Python from garbage-collecting and
# therefore closing our file after this function exits. That would
# unlock the lock for us, and we don't want that.
global file_handle
file_handle = open(tmpfile, "w")
except IOError, e:
_fatal("%s: create failed (%s)" % (tmpfile, e.strerror))
try:
s = os.fstat(file_handle.fileno())
except IOError, e:
_fatal("%s: fstat failed (%s)" % (tmpfile, e.strerror))
try:
file_handle.write("%s\n" % pid)
file_handle.flush()
except OSError, e:
_fatal("%s: write failed: %s" % (tmpfile, e.strerror))
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
_fatal("%s: fcntl failed: %s" % (tmpfile, e.strerror))
# Rename or link it to the correct name.
if _overwrite_pidfile:
try:
os.rename(tmpfile, _pidfile)
except OSError, e:
_fatal("failed to rename \"%s\" to \"%s\" (%s)"
% (tmpfile, _pidfile, e.strerror))
else:
while True:
try:
os.link(tmpfile, _pidfile)
error = 0
except OSError, e:
error = e.errno
if error == errno.EEXIST:
_check_already_running()
elif error != errno.EINTR:
break
if error:
_fatal("failed to link \"%s\" as \"%s\" (%s)"
% (tmpfile, _pidfile, os.strerror(error)))
# Ensure that the pidfile will get deleted on exit.
ovs.fatal_signal.add_file_to_unlink(_pidfile)
# Delete the temporary pidfile if it still exists.
if not _overwrite_pidfile:
error = ovs.fatal_signal.unlink_file_now(tmpfile)
if error:
_fatal("%s: unlink failed (%s)" % (tmpfile, os.strerror(error)))
global _pidfile_dev
global _pidfile_ino
_pidfile_dev = s.st_dev
_pidfile_ino = s.st_ino
def daemonize():
"""If configured with set_pidfile() or set_detach(), creates the pid file
and detaches from the foreground session."""
daemonize_start()
daemonize_complete()
def _waitpid(pid, options):
while True:
try:
return os.waitpid(pid, options)
except OSError, e:
if e.errno == errno.EINTR:
pass
return -e.errno, 0
def _fork_and_wait_for_startup():
try:
rfd, wfd = os.pipe()
except OSError, e:
sys.stderr.write("pipe failed: %s\n" % os.strerror(e.errno))
sys.exit(1)
try:
pid = os.fork()
except OSError, e:
sys.stderr.write("could not fork: %s\n" % os.strerror(e.errno))
sys.exit(1)
if pid > 0:
# Running in parent process.
os.close(wfd)
ovs.fatal_signal.fork()
while True:
try:
s = os.read(rfd, 1)
error = 0
except OSError, e:
s = ""
error = e.errno
if error != errno.EINTR:
break
if len(s) != 1:
retval, status = _waitpid(pid, 0)
if retval == pid:
if os.WIFEXITED(status) and os.WEXITSTATUS(status):
# Child exited with an error. Convey the same error to
# our parent process as a courtesy.
sys.exit(os.WEXITSTATUS(status))
else:
sys.stderr.write("fork child failed to signal "
"startup (%s)\n"
% ovs.process.status_msg(status))
else:
assert retval < 0
sys.stderr.write("waitpid failed (%s)\n"
% os.strerror(-retval))
sys.exit(1)
os.close(rfd)
else:
# Running in parent process.
os.close(rfd)
ovs.timeval.postfork()
#ovs.lockfile.postfork()
global _daemonize_fd
_daemonize_fd = wfd
return pid
def _fork_notify_startup(fd):
if fd is not None:
error, bytes_written = ovs.socket_util.write_fully(fd, "0")
if error:
sys.stderr.write("could not write to pipe\n")
sys.exit(1)
os.close(fd)
def _should_restart(status):
global RESTART_EXIT_CODE
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == RESTART_EXIT_CODE:
return True
if os.WIFSIGNALED(status):
for signame in ("SIGABRT", "SIGALRM", "SIGBUS", "SIGFPE", "SIGILL",
"SIGPIPE", "SIGSEGV", "SIGXCPU", "SIGXFSZ"):
if os.WTERMSIG(status) == getattr(signal, signame, None):
return True
return False
def _monitor_daemon(daemon_pid):
# XXX should log daemon's stderr output at startup time
# XXX should use setproctitle module if available
last_restart = None
while True:
retval, status = _waitpid(daemon_pid, 0)
if retval < 0:
sys.stderr.write("waitpid failed\n")
sys.exit(1)
elif retval == daemon_pid:
status_msg = ("pid %d died, %s"
% (daemon_pid, ovs.process.status_msg(status)))
if _should_restart(status):
if os.WCOREDUMP(status):
# Disable further core dumps to save disk space.
try:
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except resource.error:
vlog.warn("failed to disable core dumps")
# Throttle restarts to no more than once every 10 seconds.
if (last_restart is not None and
ovs.timeval.msec() < last_restart + 10000):
vlog.warn("%s, waiting until 10 seconds since last "
"restart" % status_msg)
while True:
now = ovs.timeval.msec()
wakeup = last_restart + 10000
if now > wakeup:
break
print "sleep %f" % ((wakeup - now) / 1000.0)
time.sleep((wakeup - now) / 1000.0)
last_restart = ovs.timeval.msec()
vlog.err("%s, restarting" % status_msg)
daemon_pid = _fork_and_wait_for_startup()
if not daemon_pid:
break
else:
vlog.info("%s, exiting" % status_msg)
sys.exit(0)
# Running in new daemon process.
def _close_standard_fds():
"""Close stdin, stdout, stderr. If we're started from e.g. an SSH session,
then this keeps us from holding that session open artificially."""
null_fd = ovs.socket_util.get_null_fd()
if null_fd >= 0:
os.dup2(null_fd, 0)
os.dup2(null_fd, 1)
os.dup2(null_fd, 2)
def daemonize_start():
"""If daemonization is configured, then starts daemonization, by forking
and returning in the child process. The parent process hangs around until
the child lets it know either that it completed startup successfully (by
calling daemon_complete()) or that it failed to start up (by exiting with a
nonzero exit code)."""
if _detach:
if _fork_and_wait_for_startup() > 0:
# Running in parent process.
sys.exit(0)
# Running in daemon or monitor process.
os.setsid()
if _monitor:
saved_daemonize_fd = _daemonize_fd
daemon_pid = _fork_and_wait_for_startup()
if daemon_pid > 0:
# Running in monitor process.
_fork_notify_startup(saved_daemonize_fd)
_close_standard_fds()
_monitor_daemon(daemon_pid)
# Running in daemon process
if _pidfile:
_make_pidfile()
def daemonize_complete():
"""If daemonization is configured, then this function notifies the parent
process that the child process has completed startup successfully."""
_fork_notify_startup(_daemonize_fd)
if _detach:
if _chdir:
os.chdir("/")
_close_standard_fds()
def usage():
sys.stdout.write("""
Daemon options:
--detach run in background as daemon
--no-chdir do not chdir to '/'
--pidfile[=FILE] create pidfile (default: %s/%s.pid)
--overwrite-pidfile with --pidfile, start even if already running
""" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME))
def __read_pidfile(pidfile, delete_if_stale):
if _pidfile_dev is not None:
try:
s = os.stat(pidfile)
if s.st_ino == _pidfile_ino and s.st_dev == _pidfile_dev:
# It's our own pidfile. We can't afford to open it,
# because closing *any* fd for a file that a process
# has locked also releases all the locks on that file.
#
# Fortunately, we know the associated pid anyhow.
return os.getpid()
except OSError:
pass
try:
file_handle = open(pidfile, "r+")
except IOError, e:
if e.errno == errno.ENOENT and delete_if_stale:
return 0
vlog.warn("%s: open: %s" % (pidfile, e.strerror))
return -e.errno
# Python fcntl doesn't directly support F_GETLK so we have to just try
# to lock it.
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
# pidfile exists but wasn't locked by anyone. Now we have the lock.
if not delete_if_stale:
file_handle.close()
vlog.warn("%s: pid file is stale" % pidfile)
return -errno.ESRCH
# Is the file we have locked still named 'pidfile'?
try:
raced = False
s = os.stat(pidfile)
s2 = os.fstat(file_handle.fileno())
if s.st_ino != s2.st_ino or s.st_dev != s2.st_dev:
raced = True
except IOError:
raced = True
if raced:
vlog.warn("%s: lost race to delete pidfile" % pidfile)
return -errno.EALREADY
# We won the right to delete the stale pidfile.
try:
os.unlink(pidfile)
except IOError, e:
vlog.warn("%s: failed to delete stale pidfile (%s)"
% (pidfile, e.strerror))
return -e.errno
else:
vlog.dbg("%s: deleted stale pidfile" % pidfile)
file_handle.close()
return 0
except IOError, e:
if e.errno not in [errno.EACCES, errno.EAGAIN]:
vlog.warn("%s: fcntl: %s" % (pidfile, e.strerror))
return -e.errno
# Someone else has the pidfile locked.
try:
try:
error = int(file_handle.readline())
except IOError, e:
vlog.warn("%s: read: %s" % (pidfile, e.strerror))
error = -e.errno
except ValueError:
vlog.warn("%s does not contain a pid" % pidfile)
error = -errno.EINVAL
return error
finally:
try:
file_handle.close()
except IOError:
pass
def read_pidfile(pidfile):
"""Opens and reads a PID from 'pidfile'. Returns the positive PID if
successful, otherwise a negative errno value."""
return __read_pidfile(pidfile, False)
def _check_already_running():
pid = __read_pidfile(_pidfile, True)
if pid > 0:
_fatal("%s: already running as pid %d, aborting" % (_pidfile, pid))
elif pid < 0:
_fatal("%s: pidfile check failed (%s), aborting"
% (_pidfile, os.strerror(pid)))
def add_args(parser):
"""Populates 'parser', an ArgumentParser allocated using the argparse
module, with the command line arguments required by the daemon module."""
pidfile = make_pidfile_name(None)
group = parser.add_argument_group(title="Daemon Options")
group.add_argument("--detach", action="store_true",
help="Run in background as a daemon.")
group.add_argument("--no-chdir", action="store_true",
help="Do not chdir to '/'.")
group.add_argument("--monitor", action="store_true",
help="Monitor %s process." % ovs.util.PROGRAM_NAME)
group.add_argument("--pidfile", nargs="?", const=pidfile,
help="Create pidfile (default %s)." % pidfile)
group.add_argument("--overwrite-pidfile", action="store_true",
help="With --pidfile, start even if already running.")
def handle_args(args):
"""Handles daemon module settings in 'args'. 'args' is an object
containing values parsed by the parse_args() method of ArgumentParser. The
parent ArgumentParser should have been prepared by add_args() before
calling parse_args()."""
if args.detach:
set_detach()
if args.no_chdir:
set_no_chdir()
if args.pidfile:
set_pidfile(args.pidfile)
if args.overwrite_pidfile:
ignore_existing_pidfile()
if args.monitor:
set_monitor()
| |
"""
:created: 2018-01
:author: Alex BROSSARD <abrossard@artfx.fr>
"""
from PySide2 import QtWidgets, QtCore, QtGui
from pymel import core as pmc
from auri.auri_lib import AuriScriptView, AuriScriptController, AuriScriptModel, is_checked, grpbox
from auri.scripts.Maya_Scripts import rig_lib
from auri.scripts.Maya_Scripts.rig_lib import RigController
reload(rig_lib)
class View(AuriScriptView):
def __init__(self, *args, **kwargs):
self.modules_cbbox = QtWidgets.QComboBox()
self.outputs_cbbox = QtWidgets.QComboBox()
self.refresh_btn = QtWidgets.QPushButton("Refresh")
self.prebuild_btn = QtWidgets.QPushButton("Prebuild")
self.side_cbbox = QtWidgets.QComboBox()
self.fk_ik_type_cbbox = QtWidgets.QComboBox()
self.ik_creation_switch = QtWidgets.QCheckBox()
self.stretch_creation_switch = QtWidgets.QCheckBox()
self.raz_ik_ctrls = QtWidgets.QCheckBox()
self.raz_fk_ctrls = QtWidgets.QCheckBox()
self.clavicle_creation_switch = QtWidgets.QCheckBox()
self.refresh_spaces_btn = QtWidgets.QPushButton("Refresh")
self.add_space_btn = QtWidgets.QPushButton("Add")
self.remove_space_btn = QtWidgets.QPushButton("Remove")
self.space_modules_cbbox = QtWidgets.QComboBox()
self.spaces_cbbox = QtWidgets.QComboBox()
self.selected_space_module = "No_space_module"
self.selected_space = "no_space"
self.space_list_view = QtWidgets.QListView()
self.space_list = QtGui.QStringListModel()
self.deform_chain_creation_switch = QtWidgets.QCheckBox()
self.how_many_thigh_jnts = QtWidgets.QSpinBox()
self.how_many_calf_jnts = QtWidgets.QSpinBox()
super(View, self).__init__(*args, **kwargs)
def set_controller(self):
self.ctrl = Controller(self.model, self)
def set_model(self):
self.model = Model()
def refresh_view(self):
self.ik_creation_switch.setChecked(self.model.ik_creation_switch)
self.stretch_creation_switch.setChecked(self.model.stretch_creation_switch)
self.clavicle_creation_switch.setChecked(self.model.clavicle_creation_switch)
self.raz_ik_ctrls.setChecked(self.model.raz_ik_ctrls)
self.raz_fk_ctrls.setChecked(self.model.raz_fk_ctrls)
self.side_cbbox.setCurrentText(self.model.side)
self.fk_ik_type_cbbox.setCurrentText(self.model.fk_ik_type)
self.ctrl.look_for_parent()
self.space_list.setStringList(self.model.space_list)
self.ctrl.look_for_parent(l_cbbox_stringlist=self.ctrl.modules_with_spaces,
l_cbbox_selection=self.selected_space_module,
l_cbbox=self.space_modules_cbbox, r_cbbox_stringlist=self.ctrl.spaces_model,
r_cbbox_selection=self.selected_space, r_cbbox=self.spaces_cbbox)
self.deform_chain_creation_switch.setChecked(self.model.deform_chain_creation_switch)
self.how_many_thigh_jnts.setValue(self.model.how_many_thigh_jnts)
self.how_many_calf_jnts.setValue(self.model.how_many_calf_jnts)
def setup_ui(self):
self.modules_cbbox.setModel(self.ctrl.modules_with_output)
self.modules_cbbox.currentTextChanged.connect(self.ctrl.on_modules_cbbox_changed)
self.outputs_cbbox.setModel(self.ctrl.outputs_model)
self.outputs_cbbox.currentTextChanged.connect(self.ctrl.on_outputs_cbbox_changed)
self.space_modules_cbbox.setModel(self.ctrl.modules_with_spaces)
self.space_modules_cbbox.currentTextChanged.connect(self.ctrl.on_space_modules_cbbox_changed)
self.spaces_cbbox.setModel(self.ctrl.spaces_model)
self.spaces_cbbox.currentTextChanged.connect(self.ctrl.on_spaces_cbbox_changed)
self.space_list_view.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.space_list.setStringList(self.model.space_list)
self.space_list_view.setModel(self.space_list)
self.add_space_btn.clicked.connect(self.ctrl.add_space_to_list)
self.remove_space_btn.clicked.connect(self.ctrl.remove_space_from_list)
self.refresh_spaces_btn.clicked.connect(self.ctrl.look_for_spaces)
self.ik_creation_switch.stateChanged.connect(self.ctrl.on_ik_creation_switch_changed)
self.ik_creation_switch.setEnabled(False)
self.stretch_creation_switch.stateChanged.connect(self.ctrl.on_stretch_creation_switch_changed)
self.raz_ik_ctrls.stateChanged.connect(self.ctrl.on_raz_ik_ctrls_changed)
self.raz_fk_ctrls.stateChanged.connect(self.ctrl.on_raz_fk_ctrls_changed)
self.clavicle_creation_switch.stateChanged.connect(self.ctrl.on_clavicle_creation_switch_changed)
self.side_cbbox.insertItems(0, ["Left", "Right"])
self.side_cbbox.currentTextChanged.connect(self.ctrl.on_side_cbbox_changed)
self.fk_ik_type_cbbox.insertItems(0, ["one_chain", "three_chains"])
self.fk_ik_type_cbbox.currentTextChanged.connect(self.ctrl.on_fk_ik_type_changed)
self.deform_chain_creation_switch.stateChanged.connect(self.ctrl.on_deform_chain_creation_switch_changed)
self.how_many_thigh_jnts.setMinimum(2)
self.how_many_thigh_jnts.valueChanged.connect(self.ctrl.on_how_many_thigh_jnts_changed)
self.how_many_calf_jnts.setMinimum(2)
self.how_many_calf_jnts.valueChanged.connect(self.ctrl.on_how_many_calf_jnts_changed)
self.refresh_btn.clicked.connect(self.ctrl.look_for_parent)
self.prebuild_btn.clicked.connect(self.ctrl.prebuild)
main_layout = QtWidgets.QVBoxLayout()
select_parent_layout = QtWidgets.QVBoxLayout()
select_parent_grp = grpbox("Select parent", select_parent_layout)
cbbox_layout = QtWidgets.QHBoxLayout()
cbbox_layout.addWidget(self.modules_cbbox)
cbbox_layout.addWidget(self.outputs_cbbox)
select_parent_layout.addLayout(cbbox_layout)
select_parent_layout.addWidget(self.refresh_btn)
select_spaces_layout = QtWidgets.QVBoxLayout()
select_spaces_grp = grpbox("Select local spaces :", select_spaces_layout)
spaces_cbbox_layout = QtWidgets.QHBoxLayout()
spaces_cbbox_layout.addWidget(self.space_modules_cbbox)
spaces_cbbox_layout.addWidget(self.spaces_cbbox)
btn_layout = QtWidgets.QVBoxLayout()
btn_layout.addWidget(self.refresh_spaces_btn)
btn_layout.addWidget(self.add_space_btn)
select_spaces_layout.addLayout(spaces_cbbox_layout)
select_spaces_layout.addLayout(btn_layout)
space_list_layout = QtWidgets.QVBoxLayout()
space_list_grp = grpbox("local spaces :", space_list_layout)
space_list_layout.addWidget(self.space_list_view)
space_list_layout.addWidget(self.remove_space_btn)
options_layout = QtWidgets.QVBoxLayout()
options_grp = grpbox("Options", options_layout)
side_layout = QtWidgets.QVBoxLayout()
side_grp = grpbox("Side", side_layout)
side_layout.addWidget(self.side_cbbox)
chain_type_layout = QtWidgets.QVBoxLayout()
chain_type_grp = grpbox("Fk/Ik chain type", chain_type_layout)
chain_type_layout.addWidget(self.fk_ik_type_cbbox)
checkbox_layout = QtWidgets.QVBoxLayout()
ik_layout = QtWidgets.QHBoxLayout()
ik_text = QtWidgets.QLabel("IK ctrls :")
ik_layout.addWidget(ik_text)
ik_layout.addWidget(self.ik_creation_switch)
stretch_layout = QtWidgets.QHBoxLayout()
stretch_text = QtWidgets.QLabel("stretch/squash :")
stretch_layout.addWidget(stretch_text)
stretch_layout.addWidget(self.stretch_creation_switch)
clavicle_layout = QtWidgets.QHBoxLayout()
clavicle_text = QtWidgets.QLabel("clavicle :")
clavicle_layout.addWidget(clavicle_text)
clavicle_layout.addWidget(self.clavicle_creation_switch)
raz_ik_ctrls_layout = QtWidgets.QHBoxLayout()
raz_ik_ctrls_text = QtWidgets.QLabel("\"Freez\" ik ctrls :")
raz_ik_ctrls_layout.addWidget(raz_ik_ctrls_text)
raz_ik_ctrls_layout.addWidget(self.raz_ik_ctrls)
raz_fk_ctrls_layout = QtWidgets.QHBoxLayout()
raz_fk_ctrls_text = QtWidgets.QLabel("\"Freez\" fk ctrls :")
raz_fk_ctrls_layout.addWidget(raz_fk_ctrls_text)
raz_fk_ctrls_layout.addWidget(self.raz_fk_ctrls)
checkbox_layout.addLayout(ik_layout)
checkbox_layout.addLayout(stretch_layout)
checkbox_layout.addLayout(clavicle_layout)
checkbox_layout.addLayout(raz_ik_ctrls_layout)
checkbox_layout.addLayout(raz_fk_ctrls_layout)
deform_layout = QtWidgets.QVBoxLayout()
deform_grp = grpbox("Deformation", deform_layout)
deform_switch_layout = QtWidgets.QHBoxLayout()
deform_switch_text = QtWidgets.QLabel("deformation chain :")
deform_switch_layout.addWidget(deform_switch_text)
deform_switch_layout.addWidget(self.deform_chain_creation_switch)
jnts_layout = QtWidgets.QVBoxLayout()
jnts_thigh_text = QtWidgets.QLabel("How many thigh jnts :")
jnts_layout.addWidget(jnts_thigh_text)
jnts_layout.addWidget(self.how_many_thigh_jnts)
jnts_calf_text = QtWidgets.QLabel("How many calf jnts :")
jnts_layout.addWidget(jnts_calf_text)
jnts_layout.addWidget(self.how_many_calf_jnts)
deform_layout.addLayout(deform_switch_layout)
deform_layout.addLayout(jnts_layout)
options_layout.addLayout(checkbox_layout)
main_layout.addWidget(select_parent_grp)
main_layout.addWidget(side_grp)
main_layout.addWidget(chain_type_grp)
main_layout.addWidget(options_grp)
main_layout.addWidget(deform_grp)
main_layout.addWidget(select_spaces_grp)
main_layout.addWidget(space_list_grp)
main_layout.addWidget(self.prebuild_btn)
self.setLayout(main_layout)
class Controller(RigController):
def __init__(self, model, view):
"""
Args:
model (Model):
view (View):
"""
self.guides_grp = None
self.guides = []
self.guides_names = []
self.side = {}
self.side_coef = 0
self.created_skn_jnts = []
self.clavicle_jnt = None
self.created_fk_jnts = []
self.created_ik_jnts = []
self.created_fk_ctrls = []
self.created_ik_ctrls = []
self.created_ctrtl_jnts = []
self.created_fk_shapes = []
self.clavicle_ctrl = None
self.option_ctrl = None
self.plane = None
self.clavicle_ik_ctrl = None
self.ankle_fk_pos_reader = None
self.jnt_const_group = None
self.created_half_bones = []
self.jnts_to_skin = []
self.ankle_output = None
self.knee_bend_ctrl = None
RigController.__init__(self, model, view)
def on_how_many_thigh_jnts_changed(self, value):
self.model.how_many_thigh_jnts = value
def on_how_many_calf_jnts_changed(self, value):
self.model.how_many_calf_jnts = value
def on_deform_chain_creation_switch_changed(self, state):
self.model.deform_chain_creation_switch = is_checked(state)
if state == 0:
self.view.how_many_thigh_jnts.setEnabled(False)
self.view.how_many_calf_jnts.setEnabled(False)
else:
self.view.how_many_thigh_jnts.setEnabled(True)
self.view.how_many_calf_jnts.setEnabled(True)
def prebuild(self):
if self.model.clavicle_creation_switch:
self.create_temporary_outputs(["hip_clavicle_OUTPUT", "hip_OUTPUT", "knee_OUTPUT", "ankle_OUTPUT"])
else:
self.create_temporary_outputs(["hip_OUTPUT", "knee_OUTPUT", "ankle_OUTPUT"])
self.guides_names = ["{0}_hip_GUIDE".format(self.model.module_name),
"{0}_knee_GUIDE".format(self.model.module_name),
"{0}_ankle_GUIDE".format(self.model.module_name)]
if self.model.clavicle_creation_switch:
self.guides_names.append("{0}_clavicle_GUIDE".format(self.model.module_name))
self.side = {"Left": 1, "Right": -1}
self.side_coef = self.side.get(self.model.side)
if self.guide_check(self.guides_names):
self.guides = pmc.ls("{0}_hip_GUIDE".format(self.model.module_name),
"{0}_knee_GUIDE".format(self.model.module_name),
"{0}_ankle_GUIDE".format(self.model.module_name))
if self.model.clavicle_creation_switch:
self.guides.append(pmc.ls("{0}_clavicle_GUIDE".format(self.model.module_name))[0])
else:
if pmc.objExists("{0}_clavicle_GUIDE".format(self.model.module_name)):
pmc.delete("{0}_clavicle_GUIDE".format(self.model.module_name))
if pmc.objExists("{0}_leg_plane".format(self.model.module_name)):
pmc.delete("{0}_leg_plane".format(self.model.module_name))
self.plane = pmc.ls(pmc.polyCreateFacet(p=[(0, 0, 0), (0, 0, 0), (0, 0, 0)],
n="{0}_leg_plane".format(self.model.module_name), ch=0))[0]
self.guides[0].getShape().worldPosition[0] >> self.plane.getShape().pnts[0]
self.guides[1].getShape().worldPosition[0] >> self.plane.getShape().pnts[1]
self.guides[2].getShape().worldPosition[0] >> self.plane.getShape().pnts[2]
self.plane.setAttr("translateX", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("translateY", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("translateZ", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("rotateX", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("rotateY", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("rotateZ", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("scaleX", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("scaleY", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("scaleZ", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("overrideEnabled", 1)
self.plane.setAttr("overrideDisplayType", 2)
self.guides_grp = pmc.ls("{0}_guides".format(self.model.module_name))[0]
pmc.parent(self.plane, self.guides_grp)
self.guides_grp.setAttr("visibility", 1)
self.view.refresh_view()
pmc.select(cl=1)
return
hip_guide = pmc.spaceLocator(p=(0, 0, 0), n=self.guides_names[0])
knee_guide = pmc.spaceLocator(p=(0, 0, 0), n=self.guides_names[1])
ankle_guide = pmc.spaceLocator(p=(0, 0, 0), n=self.guides_names[2])
hip_guide.setAttr("translate", (2 * self.side_coef, 7, 0))
knee_guide.setAttr("translate", (2 * self.side_coef, 4, -0.0001))
ankle_guide.setAttr("translate", (2 * self.side_coef, 1, 0))
self.plane = pmc.ls(pmc.polyCreateFacet(p=[(0, 0, 0), (0, 0, 0), (0, 0, 0)],
n="{0}_leg_plane".format(self.model.module_name), ch=0))[0]
hip_guide.getShape().worldPosition[0] >> self.plane.getShape().pnts[0]
knee_guide.getShape().worldPosition[0] >> self.plane.getShape().pnts[1]
ankle_guide.getShape().worldPosition[0] >> self.plane.getShape().pnts[2]
self.plane.setAttr("translateX", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("translateY", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("translateZ", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("rotateX", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("rotateY", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("rotateZ", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("scaleX", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("scaleY", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("scaleZ", lock=True, keyable=False, channelBox=False)
self.plane.setAttr("overrideEnabled", 1)
self.plane.setAttr("overrideDisplayType", 2)
self.guides = [hip_guide, knee_guide, ankle_guide]
if self.model.clavicle_creation_switch:
clavicle_guide = pmc.spaceLocator(p=(0, 0, 0), n=self.guides_names[3])
clavicle_guide.setAttr("translate", (1 * self.side_coef, 7.5, 0.5))
self.guides.append(clavicle_guide)
self.guides_grp = self.group_guides(self.guides)
pmc.parent(self.plane, self.guides_grp)
self.view.refresh_view()
pmc.select(cl=1)
def execute(self):
self.prebuild()
self.delete_existing_objects()
self.connect_to_parent()
self.create_skn_jnts()
self.create_options_ctrl()
if self.model.clavicle_creation_switch:
self.create_clavicle_ctrl()
if self.model.fk_ik_type == "three_chains":
self.create_and_connect_fk_ik_jnts()
self.create_fk()
if self.model.ik_creation_switch:
self.create_ik()
if self.model.stretch_creation_switch:
self.connect_fk_stretch(self.created_fk_jnts, self.created_fk_ctrls)
if self.model.ik_creation_switch:
self.connect_ik_stretch(self.created_ik_jnts, self.created_ik_ctrls, self.side_coef,
self.created_fk_ctrls[0].getParent(), self.created_ik_ctrls[0],
self.ankle_fk_pos_reader)
self.create_outputs()
if self.model.raz_ik_ctrls:
rig_lib.raz_ik_ctrl_translate_rotate(self.created_ik_ctrls[0], self.created_ik_jnts[-1], self.side_coef)
self.create_local_spaces()
if self.model.raz_fk_ctrls:
for i, ctrl in enumerate(self.created_fk_ctrls):
rig_lib.raz_fk_ctrl_rotate(ctrl, self.created_fk_jnts[i], self.model.stretch_creation_switch)
if self.model.fk_ik_type == "one_chain":
self.create_and_connect_ctrl_jnts()
self.create_one_chain_fk()
self.create_one_chain_ik()
if self.model.stretch_creation_switch:
self.connect_one_chain_fk_ik_stretch(self.created_ctrtl_jnts, self.created_ik_ctrls[0],
self.option_ctrl, self.created_skn_jnts)
self.create_ik_knee_snap()
if self.model.deform_chain_creation_switch:
self.create_one_chain_half_bones()
self.jnts_to_skin.append(self.create_deformation_chain("{0}_hip_to_knee".format(self.model.module_name),
self.created_skn_jnts[0], self.created_skn_jnts[1],
self.created_ctrtl_jnts[0], self.created_ctrtl_jnts[1],
self.option_ctrl, self.model.how_many_thigh_jnts,
self.side_coef)[1:-1])
self.jnts_to_skin.append(self.create_deformation_chain("{0}_knee_to_ankle".format(self.model.module_name),
self.created_skn_jnts[1], self.created_skn_jnts[2],
self.created_ctrtl_jnts[1], self.created_ctrtl_jnts[2],
self.option_ctrl, self.model.how_many_calf_jnts,
self.side_coef)[1:-1])
self.create_outputs()
if self.model.raz_ik_ctrls:
rig_lib.raz_one_chain_ik_ctrl_translate_rotate(self.created_ik_ctrls[0])
pmc.xform(self.created_ik_ctrls[2], ws=1, translation=(pmc.xform(self.created_ctrtl_jnts[1], q=1, ws=1,
translation=1)))
self.create_local_spaces()
if self.model.raz_fk_ctrls:
self.option_ctrl.setAttr("fkIk", 1)
pmc.refresh()
self.option_ctrl.setAttr("fkIk", 0)
for i, ctrl in enumerate(self.created_ctrtl_jnts):
if self.model.deform_chain_creation_switch:
rig_lib.raz_one_chain_ikfk_fk_ctrl_rotate(ctrl)
else:
rig_lib.raz_one_chain_ikfk_fk_ctrl_rotate(ctrl, self.created_skn_jnts[i])
self.clean_rig()
pmc.select(cl=1)
def create_skn_jnts(self):
duplicates_guides = []
for guide in self.guides:
duplicate = guide.duplicate(n="{0}_duplicate".format(guide))[0]
duplicates_guides.append(duplicate)
leg_plane = pmc.polyCreateFacet(p=[pmc.xform(duplicates_guides[0], q=1, ws=1, translation=1),
pmc.xform(duplicates_guides[1], q=1, ws=1, translation=1),
pmc.xform(duplicates_guides[2], q=1, ws=1, translation=1)],
n="{0}_temporary_leg_plane".format(self.model.module_name), ch=1)[0]
leg_plane_face = pmc.ls(leg_plane)[0].f[0]
for guide in duplicates_guides:
guide.setAttr("rotateOrder", 4)
if self.model.clavicle_creation_switch:
clav_const = pmc.aimConstraint(duplicates_guides[0], duplicates_guides[3], maintainOffset=0,
aimVector=(0.0, 1.0 * self.side_coef, 0.0),
upVector=(0.0, 0.0, 1.0 * self.side_coef), worldUpType="vector",
worldUpVector=(0.0, 0.0, 1.0))
pmc.delete(clav_const)
hip_const = pmc.normalConstraint(leg_plane_face, duplicates_guides[0], aimVector=(1.0, 0.0, 0.0),
upVector=(0.0, 1.0 * self.side_coef, 0.0), worldUpType="object",
worldUpObject=duplicates_guides[1])
knee_cons = pmc.normalConstraint(leg_plane_face, duplicates_guides[1], aimVector=(1.0, 0.0, 0.0),
upVector=(0.0, 1.0 * self.side_coef, 0.0), worldUpType="object",
worldUpObject=duplicates_guides[2])
pmc.delete(hip_const)
pmc.delete(knee_cons)
pmc.parent(duplicates_guides[1], duplicates_guides[0])
pmc.parent(duplicates_guides[2], duplicates_guides[1])
temp_guide_orient = pmc.group(em=1, n="temp_guide_orient_grp")
temp_guide_orient.setAttr("translate", pmc.xform(duplicates_guides[0], q=1, ws=1, translation=1))
temp_guide_orient.setAttr("rotate", 90 * (1 - self.side_coef), 0, 180)
pmc.parent(duplicates_guides[0], temp_guide_orient, r=0)
if self.model.clavicle_creation_switch:
pmc.select(cl=1)
self.clavicle_jnt = pmc.joint(p=(pmc.xform(duplicates_guides[3], q=1, ws=1, translation=1)),
n="{0}_hip_clavicle_SKN".format(self.model.module_name))
self.clavicle_jnt.setAttr("rotateOrder", 4)
self.clavicle_jnt.setAttr("jointOrientX", 90 * (1 - self.side_coef))
self.clavicle_jnt.setAttr("jointOrientZ", 180)
pmc.xform(self.clavicle_jnt, ws=1, rotation=(pmc.xform(duplicates_guides[3], q=1, ws=1, rotation=1)))
clav_end = pmc.joint(p=(pmc.xform(duplicates_guides[0], q=1, ws=1, translation=1)),
n="{0}_clavicle_end_JNT".format(self.model.module_name))
clav_end.setAttr("rotateOrder", 4)
pmc.parent(self.clavicle_jnt, self.jnt_input_grp, r=0)
pmc.select(cl=1)
hip_jnt = pmc.joint(p=(pmc.xform(duplicates_guides[0], q=1, ws=1, translation=1)),
n="{0}_hip_SKN".format(self.model.module_name))
hip_jnt.setAttr("rotateOrder", 4)
hip_jnt.setAttr("jointOrientX", 90 * (1 - self.side_coef))
hip_jnt.setAttr("jointOrientZ", 180)
hip_jnt.setAttr("rotate", pmc.xform(duplicates_guides[0], q=1, rotation=1))
knee_jnt = pmc.joint(p=(pmc.xform(duplicates_guides[1], q=1, ws=1, translation=1)),
n="{0}_knee_SKN".format(self.model.module_name))
knee_jnt.setAttr("rotateOrder", 4)
knee_jnt.setAttr("rotate", pmc.xform(duplicates_guides[1], q=1, rotation=1))
ankle_jnt = pmc.joint(p=(pmc.xform(duplicates_guides[2], q=1, ws=1, translation=1)),
n="{0}_ankle_SKN".format(self.model.module_name))
ankle_jnt.setAttr("rotateOrder", 4)
pmc.parent(hip_jnt, self.jnt_input_grp, r=0)
self.jnt_const_group = pmc.group(em=1, n="{0}_jnts_const_GRP".format(self.model.module_name))
self.jnt_const_group.setAttr("translate", pmc.xform(hip_jnt, q=1, ws=1, translation=1))
pmc.parent(self.jnt_const_group, self.jnt_input_grp, r=0)
if self.model.clavicle_creation_switch:
pmc.pointConstraint(pmc.listRelatives(self.clavicle_jnt, children=1)[0], self.jnt_const_group, maintainOffset=0)
pmc.parent(hip_jnt, self.jnt_const_group, r=0)
self.created_skn_jnts = [hip_jnt, knee_jnt, ankle_jnt]
self.jnts_to_skin = self.created_skn_jnts[:]
if self.model.clavicle_creation_switch:
self.jnts_to_skin.append(self.clavicle_jnt)
pmc.delete(duplicates_guides[:])
pmc.delete(temp_guide_orient)
pmc.delete(leg_plane)
def create_options_ctrl(self):
self.option_ctrl = rig_lib.little_cube("{0}_option_CTRL".format(self.model.module_name))
option_ofs = pmc.group(self.option_ctrl, n="{0}_option_ctrl_OFS".format(self.model.module_name), r=1)
pmc.parent(option_ofs, self.ctrl_input_grp)
rig_lib.matrix_constraint(self.created_skn_jnts[-1], option_ofs, srt="trs")
ctrl_shape = self.option_ctrl.getShape()
pmc.move(ctrl_shape, [-2.5 * self.side_coef, 0, 0], relative=1, objectSpace=1, worldSpaceDistance=1)
self.option_ctrl.addAttr("fkIk", attributeType="float", defaultValue=0, hidden=0, keyable=1, hasMaxValue=1,
hasMinValue=1, maxValue=1, minValue=0)
if self.model.clavicle_creation_switch:
self.option_ctrl.addAttr("hipClavicleIkCtrl", attributeType="bool", defaultValue=0, hidden=0, keyable=1)
def create_clavicle_ctrl(self):
clavicle_ik_handle = pmc.ikHandle(n="{0}_hip_clavicle_ik_HDL".format(self.model.module_name), startJoint=self.clavicle_jnt,
endEffector=pmc.listRelatives(self.clavicle_jnt, children=1)[0],
solver="ikSCsolver")[0]
ik_effector = pmc.listRelatives(self.clavicle_jnt, children=1)[-1]
ik_effector.rename("{0}_hip_clavicle_ik_EFF".format(self.model.module_name))
clav_shape = rig_lib.stick_ball("{0}_hip_clavicle_CTRL_shape".format(self.model.module_name))
cvs = clav_shape.getShape().cv[:]
for i, cv in enumerate(cvs):
if i != 0:
pmc.xform(cv, ws=1, translation=(pmc.xform(cv, q=1, ws=1, translation=1)[0] - (2 * self.side_coef),
pmc.xform(cv, q=1, ws=1, translation=1)[1],
pmc.xform(cv, q=1, ws=1, translation=1)[2] * -self.side_coef))
self.clavicle_ctrl = rig_lib.create_jnttype_ctrl("{0}_hip_clavicle_CTRL".format(self.model.module_name), clav_shape,
drawstyle=2, rotateorder=4)
pmc.select(cl=1)
clav_ofs = pmc.joint(p=(0, 0, 0), n="{0}_hip_clavicle_ctrl_OFS".format(self.model.module_name))
clav_ofs.setAttr("rotateOrder", 4)
clav_ofs.setAttr("drawStyle", 2)
pmc.parent(self.clavicle_ctrl, clav_ofs)
clav_ofs.setAttr("translate", pmc.xform(self.clavicle_jnt, q=1, ws=1, translation=1))
clav_ofs.setAttr("jointOrient", (90 * (1 - self.side_coef), 0, 180))
pmc.parent(clav_ofs, self.ctrl_input_grp)
pmc.parentConstraint(self.clavicle_ctrl, self.clavicle_jnt, maintainOffset=1)
clav_ik_shape = rig_lib.medium_cube("{0}_hip_clavicle_ik_CTRL_shape".format(self.model.module_name))
self.clavicle_ik_ctrl = rig_lib.create_jnttype_ctrl("{0}_hip_clavicle_ik_CTRL".format(self.model.module_name),
clav_ik_shape, drawstyle=2, rotateorder=4)
pmc.select(cl=1)
clav_ik_ofs = pmc.joint(p=(0, 0, 0), n="{0}_hip_clavicle_ik_ctrl_OFS".format(self.model.module_name))
clav_ik_ofs.setAttr("rotateOrder", 4)
clav_ik_ofs.setAttr("drawStyle", 2)
pmc.parent(self.clavicle_ik_ctrl, clav_ik_ofs)
clav_ik_ofs.setAttr("translate", pmc.xform(pmc.listRelatives(self.clavicle_jnt, children=1)[0], q=1, ws=1,
translation=1))
pmc.parent(clav_ik_ofs, self.clavicle_ctrl, r=0)
pmc.parent(clavicle_ik_handle, self.clavicle_ik_ctrl)
clavicle_ik_handle.setAttr("visibility", 0)
self.connect_one_jnt_ik_stretch(pmc.listRelatives(self.clavicle_jnt, children=1)[0], self.clavicle_ctrl,
self.clavicle_ik_ctrl)
def create_and_connect_fk_ik_jnts(self):
hip_fk_jnt = \
self.created_skn_jnts[0].duplicate(n="{0}_hip_fk_JNT".format(self.model.module_name))[0]
knee_fk_jnt = pmc.ls("{0}_hip_fk_JNT|{0}_knee_SKN".format(self.model.module_name))[0]
ankle_fk_jnt = pmc.ls("{0}_hip_fk_JNT|{0}_knee_SKN|{0}_ankle_SKN".format(self.model.module_name))[0]
knee_fk_jnt.rename("{0}_knee_fk_JNT".format(self.model.module_name))
ankle_fk_jnt.rename("{0}_ankle_fk_JNT".format(self.model.module_name))
self.created_fk_jnts = [hip_fk_jnt, knee_fk_jnt, ankle_fk_jnt]
hip_ik_jnt = self.created_skn_jnts[0].duplicate(n="{0}_hip_ik_JNT".format(self.model.module_name))[0]
knee_ik_jnt = pmc.ls("{0}_hip_ik_JNT|{0}_knee_SKN".format(self.model.module_name))[0]
ankle_ik_jnt = pmc.ls("{0}_hip_ik_JNT|{0}_knee_SKN|{0}_ankle_SKN".format(self.model.module_name))[0]
knee_ik_jnt.rename("{0}_knee_ik_JNT".format(self.model.module_name))
ankle_ik_jnt.rename("{0}_ankle_ik_JNT".format(self.model.module_name))
self.created_ik_jnts = [hip_ik_jnt, knee_ik_jnt, ankle_ik_jnt]
for i, skn_jnt in enumerate(self.created_skn_jnts):
pair_blend = pmc.createNode("pairBlend", n="{0}_ik_fk_switch_PAIRBLEND".format(skn_jnt))
blend_color = pmc.createNode("blendColors", n="{0}_ik_fk_switch_BLENDCOLORS".format(skn_jnt))
self.created_fk_jnts[i].translate >> pair_blend.inTranslate1
self.created_fk_jnts[i].rotate >> pair_blend.inRotate1
self.created_fk_jnts[i].scale >> blend_color.color2
self.created_ik_jnts[i].translate >> pair_blend.inTranslate2
self.created_ik_jnts[i].rotate >> pair_blend.inRotate2
self.created_ik_jnts[i].scale >> blend_color.color1
pair_blend.outTranslate >> skn_jnt.translate
pair_blend.outRotate >> skn_jnt.rotate
blend_color.output >> skn_jnt.scale
self.option_ctrl.fkIk >> pair_blend.weight
self.option_ctrl.fkIk >> blend_color.blender
def create_fk(self):
hip_shape = pmc.circle(c=(0, 0, 0), nr=(0, 1, 0), sw=360, r=2, d=3, s=8,
n="{0}_hip_fk_CTRL_shape".format(self.model.module_name), ch=0)[0]
hip_ctrl = rig_lib.create_jnttype_ctrl("{0}_hip_fk_CTRL".format(self.model.module_name), hip_shape,
drawstyle=0, rotateorder=4)
hip_ctrl.setAttr("radius", 0)
pmc.select(cl=1)
hip_ofs = pmc.joint(p=(0, 0, 0), n="{0}_hip_fk_ctrl_OFS".format(self.model.module_name))
hip_ofs.setAttr("rotateOrder", 4)
hip_ofs.setAttr("drawStyle", 2)
pmc.parent(hip_ctrl, hip_ofs)
hip_ofs.setAttr("translate", pmc.xform(self.created_fk_jnts[0], q=1, ws=1, translation=1))
hip_ofs.setAttr("jointOrientX", 90 * (1 - self.side_coef))
hip_ofs.setAttr("jointOrientZ", 180)
hip_ctrl.setAttr("rotate", pmc.xform(self.created_fk_jnts[0], q=1, rotation=1))
pmc.parent(hip_ofs, self.ctrl_input_grp, r=0)
knee_shape = pmc.circle(c=(0, 0, 0), nr=(0, 1, 0), sw=360, r=2, d=3, s=8,
n="{0}_knee_fk_CTRL_shape".format(self.model.module_name), ch=0)[0]
knee_ctrl = rig_lib.create_jnttype_ctrl("{0}_knee_fk_CTRL".format(self.model.module_name), knee_shape,
drawstyle=0, rotateorder=4)
knee_ctrl.setAttr("radius", 0)
knee_ctrl.setAttr("translate", pmc.xform(self.created_fk_jnts[1], q=1, ws=1, translation=1))
knee_ctrl.setAttr("rotate", pmc.xform(self.created_fk_jnts[1], q=1, rotation=1))
pmc.parent(knee_ctrl, hip_ctrl, r=0)
pmc.reorder(knee_ctrl, front=1)
knee_ctrl.setAttr("jointOrient", (0, 0, 0))
ankle_shape = pmc.circle(c=(0, 0, 0), nr=(0, 1, 0), sw=360, r=2, d=3, s=8,
n="{0}_ankle_fk_CTRL_shape".format(self.model.module_name), ch=0)[0]
ankle_ctrl = rig_lib.create_jnttype_ctrl("{0}_ankle_fk_CTRL".format(self.model.module_name), ankle_shape,
drawstyle=0, rotateorder=4)
ankle_ctrl.setAttr("radius", 0)
ankle_ctrl.setAttr("translate", pmc.xform(self.created_fk_jnts[2], q=1, ws=1, translation=1))
ankle_ctrl.setAttr("rotate", pmc.xform(self.created_fk_jnts[2], q=1, rotation=1))
pmc.parent(ankle_ctrl, knee_ctrl, r=0)
pmc.reorder(ankle_ctrl, front=1)
ankle_ctrl.setAttr("jointOrient", (0, 0, 0))
self.created_fk_ctrls = [hip_ctrl, knee_ctrl, ankle_ctrl]
for i, ctrl in enumerate(self.created_fk_ctrls):
ctrl.rotate >> self.created_fk_jnts[i].rotate
if ctrl == self.created_fk_ctrls[-1]:
ctrl.scale >> self.created_fk_jnts[i].scale
if self.model.clavicle_creation_switch:
# pmc.pointConstraint(pmc.listRelatives(self.clavicle_jnt, children=1)[0], hip_ofs, maintainOffset=1)
pmc.parent(hip_ofs, self.clavicle_ik_ctrl)
def create_ik(self):
fk_ctrl_01_value = pmc.xform(self.created_fk_ctrls[0], q=1, rotation=1)
fk_ctrl_02_value = pmc.xform(self.created_fk_ctrls[1], q=1, rotation=1)
fk_ctrl_03_value = pmc.xform(self.created_fk_ctrls[2], q=1, rotation=1)
ik_handle = pmc.ikHandle(n=("{0}_ik_HDL".format(self.model.module_name)),
startJoint=self.created_ik_jnts[0], endEffector=self.created_ik_jnts[-1],
solver="ikRPsolver")[0]
ik_effector = pmc.listRelatives(self.created_ik_jnts[-2], children=1)[1]
ik_effector.rename("{0}_ik_EFF".format(self.model.module_name))
ik_shape = rig_lib.medium_cube("{0}_ankle_ik_CTRL_shape".format(self.model.module_name))
ik_ctrl = rig_lib.create_jnttype_ctrl("{0}_ankle_ik_CTRL".format(self.model.module_name), ik_shape, drawstyle=2,
rotateorder=4)
pmc.select(cl=1)
ik_ctrl_ofs = pmc.joint(p=(0, 0, 0), n="{0}_ankle_ik_ctrl_OFS".format(self.model.module_name))
ik_ctrl_ofs.setAttr("rotateOrder", 4)
ik_ctrl_ofs.setAttr("drawStyle", 2)
pmc.parent(ik_ctrl, ik_ctrl_ofs)
self.created_fk_ctrls[0].setAttr("rotate", (0, 0, 0))
self.created_fk_ctrls[1].setAttr("rotate", (0, 0, 0))
self.created_fk_ctrls[2].setAttr("rotate", (0, 0, 0))
ik_ctrl_ofs.setAttr("translate", pmc.xform(self.created_fk_jnts[2], q=1, ws=1, translation=1))
pmc.parent(ik_handle, ik_ctrl_ofs, r=0)
ik_ctrl.setAttr("translate", pmc.xform(ik_handle, q=1, translation=1))
pmc.parent(ik_handle, ik_ctrl, r=0)
if self.model.clavicle_creation_switch:
pmc.parent(ik_ctrl_ofs, self.clavicle_ik_ctrl)
else:
pmc.parent(ik_ctrl_ofs, self.ctrl_input_grp)
ik_ctrl.setAttr("translate", (0, 0, 0))
manual_pole_vector_shape = rig_lib.jnt_shape_curve(
"{0}_manual_poleVector_CTRL_shape".format(self.model.module_name))
manual_pole_vector = rig_lib.create_jnttype_ctrl("{0}_manual_poleVector_CTRL".format(self.model.module_name),
manual_pole_vector_shape, drawstyle=2)
manual_pv_ofs = pmc.group(manual_pole_vector, n="{0}_manual_poleVector_ctrl_OFS".format(self.model.module_name))
manual_pv_ofs.setAttr("translate", (pmc.xform(self.created_fk_jnts[1], q=1, ws=1, translation=1)[0],
pmc.xform(self.created_fk_jnts[1], q=1, ws=1, translation=1)[1],
pmc.xform(self.created_fk_jnts[1], q=1, ws=1, translation=1)[2] + (
(pmc.xform(self.created_fk_jnts[1], q=1, translation=1)[
1]) * self.side_coef)))
auto_pole_vector_shape = rig_lib.jnt_shape_curve(
"{0}_auto_poleVector_CTRL_shape".format(self.model.module_name))
auto_pole_vector = rig_lib.create_jnttype_ctrl("{0}_auto_poleVector_CTRL".format(self.model.module_name),
auto_pole_vector_shape, drawstyle=2)
auto_pv_ofs = pmc.group(auto_pole_vector, n="{0}_auto_poleVector_ctrl_OFS".format(self.model.module_name))
auto_pv_ofs.setAttr("translate", (pmc.xform(self.created_fk_jnts[0], q=1, ws=1, translation=1)[0],
pmc.xform(self.created_fk_jnts[0], q=1, ws=1, translation=1)[1],
pmc.xform(self.created_fk_jnts[0], q=1, ws=1, translation=1)[2]))
ik_ctrl.addAttr("poleVector", attributeType="enum", enumName=["auto", "manual"], hidden=0, keyable=1)
pole_vector_const = pmc.poleVectorConstraint(manual_pole_vector, auto_pole_vector, ik_handle)
rig_lib.connect_condition_to_constraint("{0}.{1}W0".format(pole_vector_const, manual_pole_vector),
ik_ctrl.poleVector, 1,
"{0}_manual_poleVector_COND".format(ik_ctrl))
rig_lib.connect_condition_to_constraint("{0}.{1}W1".format(pole_vector_const, auto_pole_vector),
ik_ctrl.poleVector, 0,
"{0}_auto_poleVector_COND".format(ik_ctrl))
pmc.parent(manual_pv_ofs, self.ctrl_input_grp, r=0)
pmc.parent(auto_pv_ofs, self.parts_grp, r=0)
self.created_ik_jnts[1].setAttr("preferredAngleX", 90)
const = pmc.parentConstraint(ik_ctrl, self.created_ik_jnts[-1], maintainOffset=1, skipTranslate=["x", "y", "z"])
const.setAttr("target[0].targetOffsetRotate", (0, 90 * (1 - self.side_coef), 90 * (1 + self.side_coef)))
const.setAttr("target[0].targetOffsetTranslate", (0, 0, 0))
ik_ctrl.scale >> self.created_ik_jnts[-1].scale
ik_ctrl.addAttr("legTwist", attributeType="float", defaultValue=0, hidden=0, keyable=1)
pmc.aimConstraint(ik_handle, auto_pv_ofs,
maintainOffset=1, aimVector=(0.0, -1.0, 0.0),
upVector=(1.0, 0.0, 0.0), worldUpType="objectrotation",
worldUpVector=(1.0, 0.0, 0.0), worldUpObject=ik_ctrl)
ik_ctrl.legTwist >> ik_handle.twist
self.created_ik_ctrls = [ik_ctrl, manual_pole_vector, auto_pole_vector]
self.created_fk_ctrls[0].setAttr("rotate", fk_ctrl_01_value)
self.created_fk_ctrls[1].setAttr("rotate", fk_ctrl_02_value)
self.created_fk_ctrls[2].setAttr("rotate", fk_ctrl_03_value)
pmc.xform(manual_pole_vector, ws=1, translation=(pmc.xform(self.created_fk_jnts[1], q=1, ws=1, translation=1)))
ik_handle.setAttr("visibility", 0)
self.ankle_fk_pos_reader = pmc.spaceLocator(p=(0, 0, 0),
n="{0}_ankle_fk_pos_reader_LOC".format(self.model.module_name))
self.ankle_fk_pos_reader.setAttr("rotateOrder", 4)
self.ankle_fk_pos_reader.setAttr("visibility", 0)
pmc.parent(self.ankle_fk_pos_reader, self.created_fk_ctrls[-1], r=1)
self.ankle_fk_pos_reader.setAttr("rotate", (90 * (1 - self.side_coef), 0, 180))
rig_lib.clean_ctrl(self.ankle_fk_pos_reader, 0, trs="trs")
pmc.xform(ik_ctrl, ws=1, translation=(pmc.xform(self.created_fk_jnts[-1], q=1, ws=1, translation=1)))
pmc.xform(ik_ctrl, ws=1, rotation=(pmc.xform(self.ankle_fk_pos_reader, q=1, ws=1, rotation=1)))
pmc.xform(auto_pole_vector, ws=1, translation=(pmc.xform(self.created_fk_jnts[1], q=1, ws=1, translation=1)))
def create_local_spaces(self):
spaces_names = []
space_locs = []
for space in self.model.space_list:
name = str(space).replace("_OUTPUT", "")
if "local_ctrl" in name:
name = "world"
spaces_names.append(name)
if pmc.objExists("{0}_{1}_SPACELOC".format(self.model.module_name, name)):
pmc.delete("{0}_{1}_SPACELOC".format(self.model.module_name, name))
space_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_{1}_SPACELOC".format(self.model.module_name, name))
space_locs.append(space_loc)
spaces_names.append("local")
if self.model.fk_ik_type == "three_chains":
fk_ctrls = self.created_fk_ctrls
else:
fk_ctrls = self.created_ctrtl_jnts
if len(self.model.space_list) > 0:
fk_ctrls[0].addAttr("space", attributeType="enum", enumName=spaces_names, hidden=0, keyable=1)
self.created_ik_ctrls[0].addAttr("space", attributeType="enum", enumName=spaces_names, hidden=0, keyable=1)
fk_const_switch = pmc.createNode("plusMinusAverage", n="{0}_fk_ik_switch_invert_value_PMA".format(self.model.module_name))
fk_const_switch.setAttr("operation", 2)
fk_const_switch.setAttr("input1D[0]", 1)
self.option_ctrl.fkIk >> fk_const_switch.input1D[1]
for i, space in enumerate(self.model.space_list):
space_locs[i].setAttr("translate", pmc.xform(self.created_skn_jnts[0], q=1, ws=1, translation=1))
pmc.parent(space_locs[i], space)
fk_space_const = pmc.orientConstraint(space_locs[i], fk_ctrls[0].getParent(), maintainOffset=0)
ik_space_const = pmc.parentConstraint(space_locs[i], self.created_ik_ctrls[0].getParent(), maintainOffset=1)
# pole_vector_const = pmc.parentConstraint(space_locs[i], self.created_ik_ctrls[1].getParent(), maintainOffset=1)
rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(fk_space_const, space_locs[i], i),
fk_ctrls[0].space, i,
"{0}_{1}Space_COND".format(fk_ctrls[0], spaces_names[i]),
switch=fk_const_switch)
rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(ik_space_const, space_locs[i], i),
self.created_ik_ctrls[0].space, i,
"{0}_{1}Space_COND".format(self.created_ik_ctrls[0], spaces_names[i]))
# rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(pole_vector_const, space_locs[i], i),
# self.created_ik_ctrls[0].space, i,
# "{0}_{1}Space_COND".format(self.created_ik_ctrls[1], spaces_names[i]))
if not self.model.deform_chain_creation_switch:
pmc.parent(self.jnt_const_group, pmc.listRelatives(self.clavicle_jnt, children=1, type="joint")[0])
jnt_const_grp_const = pmc.orientConstraint(space_locs[i], self.jnt_const_group, maintainOffset=0)
rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(jnt_const_grp_const, space_locs[i], i),
self.created_ctrtl_jnts[0].space, i,
"{0}_{1}_COND".format(self.jnt_const_group, spaces_names[i]),
switch=fk_const_switch)
self.created_ik_ctrls[1].addAttr("space", attributeType="enum", enumName=["world", "foot"], hidden=0, keyable=1)
if pmc.objExists("{0}_world_SPACELOC".format(self.model.module_name)):
world_loc = pmc.ls("{0}_world_SPACELOC".format(self.model.module_name))[0]
else:
world_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_world_SPACELOC".format(self.model.module_name))
world_loc.setAttr("translate", pmc.xform(self.created_skn_jnts[0], q=1, ws=1, translation=1))
world_parent = pmc.ls(regex=".*_local_ctrl_OUTPUT$")[0]
pmc.parent(world_loc, world_parent)
pole_vector_const = pmc.parentConstraint(world_loc, self.created_ik_ctrls[0], self.created_ik_ctrls[1].getParent(),
maintainOffset=1)
rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(pole_vector_const, world_loc, 0),
self.created_ik_ctrls[1].space, 0,
"{0}_worldSpace_COND".format(self.created_ik_ctrls[0]))
rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(pole_vector_const, self.created_ik_ctrls[0], 1),
self.created_ik_ctrls[1].space, 1,
"{0}_footSpace_COND".format(self.created_ik_ctrls[0]))
def clean_rig(self):
self.jnt_input_grp.setAttr("visibility", 0)
self.parts_grp.setAttr("visibility", 0)
self.guides_grp.setAttr("visibility", 0)
if self.model.side == "Left":
color_value = 6
else:
color_value = 13
rig_lib.clean_ctrl(self.option_ctrl, 9, trs="trs")
self.option_ctrl.setAttr("fkIk", 1)
invert_value = pmc.createNode("plusMinusAverage", n="{0}_fk_visibility_MDL".format(self.model.module_name))
invert_value.setAttr("input1D[0]", 1)
invert_value.setAttr("operation", 2)
self.option_ctrl.fkIk >> invert_value.input1D[1]
if self.model.clavicle_creation_switch:
rig_lib.clean_ctrl(self.clavicle_ctrl, color_value, trs="s")
rig_lib.clean_ctrl(self.clavicle_ik_ctrl, color_value, trs="rs")
rig_lib.clean_ctrl(self.clavicle_ik_ctrl.getShape(), color_value, trs="",
visibility_dependence=self.option_ctrl.hipClavicleIkCtrl)
if self.model.fk_ik_type == "three_chains":
fk_ctrls = self.created_fk_ctrls
else:
fk_ctrls = self.created_ctrtl_jnts
rig_lib.clean_ctrl(fk_ctrls[0], color_value, trs="ts",
visibility_dependence=invert_value.output1D)
rig_lib.clean_ctrl(fk_ctrls[0].getParent(), color_value, trs="trs")
rig_lib.clean_ctrl(fk_ctrls[1], color_value, trs="ts", visibility_dependence=invert_value.output1D)
rig_lib.clean_ctrl(fk_ctrls[2], color_value, trs="t", visibility_dependence=invert_value.output1D)
if self.model.ik_creation_switch:
rig_lib.clean_ctrl(self.created_ik_ctrls[0], color_value, trs="", visibility_dependence=self.option_ctrl.fkIk)
rig_lib.clean_ctrl(self.created_ik_ctrls[0].getParent(), color_value, trs="trs")
rig_lib.clean_ctrl(self.created_ik_ctrls[1].getParent(), color_value, trs="trs", visibility_dependence=self.option_ctrl.fkIk)
rig_lib.clean_ctrl(self.created_ik_ctrls[1], color_value, trs="rs", visibility_dependence=self.created_ik_ctrls[0].poleVector)
rig_lib.clean_ctrl(self.created_ik_ctrls[2].getParent(), color_value, trs="trs")
rig_lib.clean_ctrl(self.created_ik_ctrls[2], color_value, trs="trs")
if self.model.fk_ik_type == "one_chain":
blend_scale = pmc.createNode("blendColors", n="{0}_scale_blendColor".format(self.ankle_output))
self.option_ctrl.fkIk >> blend_scale.blender
self.created_ik_ctrls[0].scaleX >> blend_scale.color1R
self.created_ik_ctrls[0].scaleY >> blend_scale.color1G
self.created_ik_ctrls[0].scaleZ >> blend_scale.color1B
self.created_ctrtl_jnts[-1].scaleX >> blend_scale.color2R
self.created_ctrtl_jnts[-1].scaleY >> blend_scale.color2G
self.created_ctrtl_jnts[-1].scaleZ >> blend_scale.color2B
blend_scale.outputR >> self.ankle_output.scaleX
blend_scale.outputG >> self.ankle_output.scaleY
blend_scale.outputB >> self.ankle_output.scaleZ
if self.model.deform_chain_creation_switch:
rig_lib.clean_ctrl(self.knee_bend_ctrl, color_value, trs="s",
visibility_dependence=self.option_ctrl.kneeBendCtrl)
info_crv = rig_lib.signature_shape_curve("{0}_INFO".format(self.model.module_name))
info_crv.getShape().setAttr("visibility", 0)
info_crv.setAttr("hiddenInOutliner", 1)
info_crv.setAttr("translateX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("translateY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("translateZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("visibility", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("overrideEnabled", 1)
info_crv.setAttr("overrideDisplayType", 2)
pmc.parent(info_crv, self.parts_grp)
rig_lib.add_parameter_as_extra_attr(info_crv, "Module", "back_leg")
rig_lib.add_parameter_as_extra_attr(info_crv, "parent_Module", self.model.selected_module)
rig_lib.add_parameter_as_extra_attr(info_crv, "parent_output", self.model.selected_output)
rig_lib.add_parameter_as_extra_attr(info_crv, "side", self.model.side)
rig_lib.add_parameter_as_extra_attr(info_crv, "ik_creation", self.model.ik_creation_switch)
rig_lib.add_parameter_as_extra_attr(info_crv, "stretch_creation", self.model.stretch_creation_switch)
rig_lib.add_parameter_as_extra_attr(info_crv, "raz_ik_ctrls", self.model.raz_ik_ctrls)
rig_lib.add_parameter_as_extra_attr(info_crv, "raz_fk_ctrls", self.model.raz_fk_ctrls)
rig_lib.add_parameter_as_extra_attr(info_crv, "clavicle_creation", self.model.clavicle_creation_switch)
rig_lib.add_parameter_as_extra_attr(info_crv, "fk_ik_type", self.model.fk_ik_type)
rig_lib.add_parameter_as_extra_attr(info_crv, "local_spaces", self.model.space_list)
rig_lib.add_parameter_as_extra_attr(info_crv, "deform_chain_creation", self.model.deform_chain_creation_switch)
rig_lib.add_parameter_as_extra_attr(info_crv, "how_many_thigh_jnts", self.model.how_many_thigh_jnts)
rig_lib.add_parameter_as_extra_attr(info_crv, "how_many_calf_jnts", self.model.how_many_calf_jnts)
if not pmc.objExists("jnts_to_SKN_SET"):
skn_set = pmc.createNode("objectSet", n="jnts_to_SKN_SET")
else:
skn_set = pmc.ls("jnts_to_SKN_SET", type="objectSet")[0]
for jnt in self.jnts_to_skin:
if type(jnt) == list:
for obj in jnt:
skn_set.add(obj)
else:
skn_set.add(jnt)
def create_outputs(self):
if self.model.clavicle_creation_switch:
rig_lib.create_output(name="{0}_hip_clavicle_OUTPUT".format(self.model.module_name), parent=self.clavicle_jnt)
if not self.model.deform_chain_creation_switch or self.model.fk_ik_type == "three_chains":
rig_lib.create_output(name="{0}_hip_OUTPUT".format(self.model.module_name), parent=self.created_skn_jnts[0])
rig_lib.create_output(name="{0}_knee_OUTPUT".format(self.model.module_name), parent=self.created_skn_jnts[1])
self.ankle_output = rig_lib.create_output(name="{0}_ankle_OUTPUT".format(self.model.module_name), parent=self.created_skn_jnts[-1])
else:
rig_lib.create_output(name="{0}_hip_OUTPUT".format(self.model.module_name), parent=self.created_ctrtl_jnts[0])
rig_lib.create_output(name="{0}_knee_OUTPUT".format(self.model.module_name), parent=self.created_ctrtl_jnts[1])
self.ankle_output = rig_lib.create_output(name="{0}_ankle_OUTPUT".format(self.model.module_name), parent=self.created_ctrtl_jnts[-1])
def create_and_connect_ctrl_jnts(self):
hip_ctrl_jnt = \
self.created_skn_jnts[0].duplicate(n="{0}_hip_fk_CTRL".format(self.model.module_name))[0]
knee_ctrl_jnt = pmc.ls("{0}_hip_fk_CTRL|{0}_knee_SKN".format(self.model.module_name))[0]
ankle_ctrl_jnt = pmc.ls("{0}_hip_fk_CTRL|{0}_knee_SKN|{0}_ankle_SKN".format(self.model.module_name))[0]
knee_ctrl_jnt.rename("{0}_knee_fk_CTRL".format(self.model.module_name))
ankle_ctrl_jnt.rename("{0}_ankle_fk_CTRL".format(self.model.module_name))
self.created_ctrtl_jnts = [hip_ctrl_jnt, knee_ctrl_jnt, ankle_ctrl_jnt]
for i, skn_jnt in enumerate(self.created_skn_jnts):
if not self.model.stretch_creation_switch:
self.created_ctrtl_jnts[i].translate >> skn_jnt.translate
self.created_ctrtl_jnts[i].jointOrient >> skn_jnt.jointOrient
self.created_ctrtl_jnts[i].rotate >> skn_jnt.rotate
self.created_ctrtl_jnts[i].scale >> skn_jnt.scale
def create_one_chain_fk(self):
hip_shape = pmc.circle(c=(0, 0, 0), nr=(0, 1, 0), sw=360, r=2, d=3, s=8,
n="{0}_hip_fk_CTRL_shape".format(self.model.module_name), ch=0)[0]
pmc.parent(hip_shape.getShape(), self.created_ctrtl_jnts[0], r=1, s=1)
self.created_ctrtl_jnts[0].getShape().rename("{0}Shape".format(self.created_ctrtl_jnts[0]))
self.created_ctrtl_jnts[0].setAttr("radius", 0)
pmc.delete(hip_shape)
pmc.select(cl=1)
hip_ofs = pmc.joint(p=(0, 0, 0), n="{0}_ctrl_jnts_OFS".format(self.model.module_name))
hip_ofs.setAttr("rotateOrder", 4)
hip_ofs.setAttr("drawStyle", 2)
hip_ofs.setAttr("translate", pmc.xform(self.created_skn_jnts[0], q=1, ws=1, translation=1))
pmc.parent(self.created_ctrtl_jnts[0], hip_ofs)
pmc.parent(hip_ofs, self.ctrl_input_grp, r=0)
knee_shape = pmc.circle(c=(0, 0, 0), nr=(0, 1, 0), sw=360, r=2, d=3, s=8,
n="{0}_knee_fk_CTRL_shape".format(self.model.module_name), ch=0)[0]
pmc.parent(knee_shape.getShape(), self.created_ctrtl_jnts[1], r=1, s=1)
self.created_ctrtl_jnts[1].getShape().rename("{0}Shape".format(self.created_ctrtl_jnts[1]))
self.created_ctrtl_jnts[1].setAttr("radius", 0)
pmc.delete(knee_shape)
ankle_shape = pmc.circle(c=(0, 0, 0), nr=(0, 1, 0), sw=360, r=2, d=3, s=8,
n="{0}_ankle_fk_CTRL_shape".format(self.model.module_name), ch=0)[0]
pmc.parent(ankle_shape.getShape(), self.created_ctrtl_jnts[2], r=1, s=1)
self.created_ctrtl_jnts[2].getShape().rename("{0}Shape".format(self.created_ctrtl_jnts[2]))
self.created_ctrtl_jnts[2].setAttr("radius", 0)
pmc.delete(ankle_shape)
self.created_fk_shapes = [self.created_ctrtl_jnts[0].getShape(), self.created_ctrtl_jnts[1].getShape(),
self.created_ctrtl_jnts[2].getShape()]
if self.model.clavicle_creation_switch:
# pmc.pointConstraint(pmc.listRelatives(self.clavicle_jnt, children=1)[0], hip_ofs, maintainOffset=1)
pmc.parent(hip_ofs, self.clavicle_ik_ctrl)
def create_one_chain_ik(self):
fk_ctrl_01_value = pmc.xform(self.created_ctrtl_jnts[0], q=1, rotation=1)
fk_ctrl_02_value = pmc.xform(self.created_ctrtl_jnts[1], q=1, rotation=1)
fk_ctrl_03_value = pmc.xform(self.created_ctrtl_jnts[2], q=1, rotation=1)
ik_handle = pmc.ikHandle(n=("{0}_ik_HDL".format(self.model.module_name)),
startJoint=self.created_ctrtl_jnts[0], endEffector=self.created_ctrtl_jnts[-1],
solver="ikRPsolver")[0]
ik_effector = pmc.listRelatives(self.created_ctrtl_jnts[-2], children=1)[-1]
ik_effector.rename("{0}_ik_EFF".format(self.model.module_name))
ik_handle.setAttr("snapEnable", 0)
ik_handle.setAttr("ikBlend", 0)
ik_shape = rig_lib.medium_cube("{0}_ankle_ik_CTRL_shape".format(self.model.module_name))
ik_ctrl = rig_lib.create_jnttype_ctrl("{0}_ankle_ik_CTRL".format(self.model.module_name), ik_shape, drawstyle=2,
rotateorder=4)
pmc.select(cl=1)
ik_ctrl_ofs = pmc.joint(p=(0, 0, 0), n="{0}_ankle_ik_ctrl_OFS".format(self.model.module_name))
ik_ctrl_ofs.setAttr("rotateOrder", 4)
ik_ctrl_ofs.setAttr("drawStyle", 2)
pmc.parent(ik_ctrl, ik_ctrl_ofs)
self.created_ctrtl_jnts[0].setAttr("rotate", (0, 0, 0))
self.created_ctrtl_jnts[1].setAttr("rotate", (0, 0, 0))
self.created_ctrtl_jnts[2].setAttr("rotate", (0, 0, 0))
ik_ctrl_ofs.setAttr("translate", pmc.xform(self.created_ctrtl_jnts[2], q=1, ws=1, translation=1))
pmc.parent(ik_handle, ik_ctrl_ofs, r=0)
ik_ctrl.setAttr("translate", pmc.xform(ik_handle, q=1, translation=1))
pmc.parent(ik_handle, ik_ctrl, r=0)
if self.model.clavicle_creation_switch:
pmc.parent(ik_ctrl_ofs, self.clavicle_ik_ctrl)
else:
pmc.parent(ik_ctrl_ofs, self.ctrl_input_grp)
ik_ctrl.setAttr("translate", (0, 0, 0))
pmc.select(self.created_ctrtl_jnts[2])
fk_rotation_jnt = pmc.joint(p=(0, 0, 0), n="{0}_ankle_fk_end_JNT".format(self.model.module_name))
fk_rotation_jnt.setAttr("translate", (0, self.side_coef, 0))
fk_rotation_jnt.setAttr("rotate", (0, 0, 0))
fk_rotation_jnt.setAttr("jointOrient", (0, 0, 0))
fk_rotation_hdl = pmc.ikHandle(n="{0}_ankle_rotation_ik_HDL".format(self.model.module_name),
startJoint=self.created_ctrtl_jnts[2], endEffector=fk_rotation_jnt,
solver="ikRPsolver")[0]
fk_rotation_effector = pmc.listRelatives(self.created_ctrtl_jnts[2], children=1)[-1]
fk_rotation_effector.rename("{0}_ankle_rotation_ik_EFF".format(self.model.module_name))
fk_rotation_hdl.setAttr("snapEnable", 0)
fk_rotation_hdl.setAttr("ikBlend", 0)
fk_rotation_hdl.setAttr("poleVector", (-1 * self.side_coef, 0, 0))
pmc.parent(fk_rotation_hdl, ik_ctrl, r=0)
self.option_ctrl.fkIk >> fk_rotation_hdl.ikBlend
fk_rotation_hdl.setAttr("visibility", 0)
fk_rotation_jnt.setAttr("visibility", 0)
manual_pole_vector_shape = rig_lib.jnt_shape_curve("{0}_manual_poleVector_CTRL_shape".format(self.model.module_name))
manual_pole_vector = rig_lib.create_jnttype_ctrl("{0}_manual_poleVector_CTRL".format(self.model.module_name),
manual_pole_vector_shape, drawstyle=2)
manual_pv_ofs = pmc.group(manual_pole_vector, n="{0}_manual_poleVector_ctrl_OFS".format(self.model.module_name))
manual_pv_ofs.setAttr("translate", (pmc.xform(self.created_ctrtl_jnts[1], q=1, ws=1, translation=1)[0],
pmc.xform(self.created_ctrtl_jnts[1], q=1, ws=1, translation=1)[1],
pmc.xform(self.created_ctrtl_jnts[1], q=1, ws=1, translation=1)[2] + (
(pmc.xform(self.created_ctrtl_jnts[1], q=1, translation=1)[1]) * self.side_coef)))
auto_pole_vector_shape = rig_lib.jnt_shape_curve("{0}_auto_poleVector_CTRL_shape".format(self.model.module_name))
auto_pole_vector = rig_lib.create_jnttype_ctrl("{0}_auto_poleVector_CTRL".format(self.model.module_name),
auto_pole_vector_shape, drawstyle=2)
auto_pv_ofs = pmc.group(auto_pole_vector, n="{0}_auto_poleVector_ctrl_OFS".format(self.model.module_name))
auto_pv_ofs.setAttr("translate", (pmc.xform(self.created_ctrtl_jnts[0], q=1, ws=1, translation=1)[0],
pmc.xform(self.created_ctrtl_jnts[0], q=1, ws=1, translation=1)[1],
pmc.xform(self.created_ctrtl_jnts[0], q=1, ws=1, translation=1)[2]))
ik_ctrl.addAttr("poleVector", attributeType="enum", enumName=["auto", "manual"], hidden=0, keyable=1)
pole_vector_const = pmc.poleVectorConstraint(manual_pole_vector, auto_pole_vector, ik_handle)
rig_lib.connect_condition_to_constraint("{0}.{1}W0".format(pole_vector_const, manual_pole_vector),
ik_ctrl.poleVector, 1,
"{0}_manual_poleVector_COND".format(ik_ctrl))
rig_lib.connect_condition_to_constraint("{0}.{1}W1".format(pole_vector_const, auto_pole_vector),
ik_ctrl.poleVector, 0,
"{0}_auto_poleVector_COND".format(ik_ctrl))
pmc.parent(manual_pv_ofs, self.ctrl_input_grp, r=0)
pmc.parent(auto_pv_ofs, self.ctrl_input_grp, r=0)
auto_pv_ofs.setAttr("visibility", 0)
self.created_ctrtl_jnts[1].setAttr("preferredAngleX", 90)
pmc.pointConstraint(self.created_ctrtl_jnts[0].getParent(), auto_pv_ofs, maintainOffset=1)
ik_ctrl.addAttr("legTwist", attributeType="float", defaultValue=0, hidden=0, keyable=1)
pmc.aimConstraint(ik_handle, auto_pv_ofs,
maintainOffset=1, aimVector=(0.0, -1.0, 0.0),
upVector=(1.0, 0.0, 0.0), worldUpType="objectrotation",
worldUpVector=(1.0, 0.0, 0.0), worldUpObject=ik_ctrl)
ik_ctrl.legTwist >> ik_handle.twist
self.created_ik_ctrls = [ik_ctrl, manual_pole_vector, auto_pole_vector]
self.created_ctrtl_jnts[0].setAttr("rotate", fk_ctrl_01_value)
self.created_ctrtl_jnts[1].setAttr("rotate", fk_ctrl_02_value)
self.created_ctrtl_jnts[2].setAttr("rotate", fk_ctrl_03_value)
pmc.xform(manual_pole_vector, ws=1, translation=(pmc.xform(self.created_ctrtl_jnts[1], q=1, ws=1, translation=1)))
ik_handle.setAttr("visibility", 0)
self.ankle_fk_pos_reader = pmc.spaceLocator(p=(0, 0, 0),
n="{0}_ankle_fk_pos_reader_LOC".format(self.model.module_name))
self.ankle_fk_pos_reader.setAttr("rotateOrder", 4)
self.ankle_fk_pos_reader.setAttr("visibility", 0)
pmc.parent(self.ankle_fk_pos_reader, self.created_ctrtl_jnts[-1], r=1)
self.ankle_fk_pos_reader.setAttr("rotate", (90 * (1 - self.side_coef), 0, 180))
rig_lib.clean_ctrl(self.ankle_fk_pos_reader, 0, trs="trs")
pmc.xform(ik_ctrl, ws=1, translation=(pmc.xform(self.created_ctrtl_jnts[-1], q=1, ws=1, translation=1)))
pmc.xform(ik_ctrl, ws=1, rotation=(pmc.xform(self.ankle_fk_pos_reader, q=1, ws=1, rotation=1)))
pmc.xform(auto_pole_vector, ws=1, translation=(pmc.xform(self.created_ctrtl_jnts[1], q=1, ws=1, translation=1)))
self.option_ctrl.fkIk >> ik_handle.ikBlend
# const = pmc.parentConstraint(ik_ctrl, self.created_ctrtl_jnts[-1], self.created_skn_jnts[-1],
# maintainOffset=1, skipTranslate=["x", "y", "z"])
# const.setAttr("target[0].targetOffsetRotate", (0, 90 * (1 - self.side_coef), 90 * (1 + self.side_coef)))
# const.setAttr("target[0].targetOffsetTranslate", (0, 0, 0))
# const.setAttr("target[1].targetOffsetRotate", (0, 0, 0))
# const.setAttr("target[1].targetOffsetTranslate", (0, 0, 0))
#
# invert_value = pmc.createNode("plusMinusAverage", n="{0}_fk_const_switch_MDL".format(self.model.module_name))
# invert_value.setAttr("input1D[0]", 1)
# invert_value.setAttr("operation", 2)
# self.option_ctrl.fkIk >> invert_value.input1D[1]
#
# self.option_ctrl.connectAttr("fkIk", "{0}.{1}W0".format(const, ik_ctrl))
# invert_value.connectAttr("output1D", "{0}.{1}W1".format(const, self.created_ctrtl_jnts[-1]))
def create_one_chain_half_bones(self):
self.created_half_bones = []
fk_ctrl_01_value = pmc.xform(self.created_ctrtl_jnts[0], q=1, rotation=1)
fk_ctrl_02_value = pmc.xform(self.created_ctrtl_jnts[1], q=1, rotation=1)
fk_ctrl_03_value = pmc.xform(self.created_ctrtl_jnts[2], q=1, rotation=1)
self.created_ctrtl_jnts[0].setAttr("rotate", (0, 0, 0))
self.created_ctrtl_jnts[1].setAttr("rotate", (0, 0, 0))
self.created_ctrtl_jnts[2].setAttr("rotate", (0, 0, 0))
for i, jnt in enumerate(self.created_skn_jnts):
pmc.disconnectAttr(jnt, inputs=1, outputs=0)
pmc.parent(jnt, self.jnt_const_group, r=0)
for i, jnt in enumerate(self.created_skn_jnts):
half_bone = pmc.duplicate(jnt, n=str(jnt).replace("SKN", "HALFBONE"))[0]
pmc.parent(jnt, half_bone)
self.created_half_bones.append(half_bone)
pmc.parentConstraint(self.created_ctrtl_jnts[i], half_bone, maintainOffset=1, skipRotate=["x", "y", "z"])
if i == 0:
rot_const = pmc.parentConstraint(self.created_ctrtl_jnts[i].getParent(), self.created_ctrtl_jnts[i],
half_bone, maintainOffset=1, skipTranslate=["x", "y", "z"])
else:
rot_const = pmc.parentConstraint(self.created_ctrtl_jnts[i - 1], self.created_ctrtl_jnts[i], half_bone,
maintainOffset=1, skipTranslate=["x", "y", "z"])
rot_const.setAttr("interpType", 2)
# self.created_ctrtl_jnts[i].scale >> jnt.scale
hip_target_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_hip_skn_target_LOC".format(self.model.module_name))
ankle_target_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_ankle_skn_target_LOC".format(self.model.module_name))
pmc.parent(hip_target_loc, self.created_half_bones[0], r=1)
pmc.parent(ankle_target_loc, self.created_half_bones[-1], r=1)
hip_target_loc.setAttr("translateY", 0.1)
ankle_target_loc.setAttr("translateY", 0.1)
pmc.aimConstraint(hip_target_loc, self.created_skn_jnts[0], maintainOffset=0, aimVector=(0.0, 1.0, 0.0),
upVector=(0.0, 0.0, 1.0), worldUpType="objectrotation",
worldUpVector=(0.0, 0.0, 1.0*self.side_coef), worldUpObject=self.jnt_const_group)
pmc.aimConstraint(ankle_target_loc, self.created_skn_jnts[-1], maintainOffset=0, aimVector=(0.0, 1.0, 0.0),
upVector=(1.0, 0.0, 0.0), worldUpType="objectrotation",
worldUpVector=(1.0, 0.0, 0.0), worldUpObject=self.created_ctrtl_jnts[-1])
knee_bend_jnt = pmc.duplicate(self.created_skn_jnts[1], n="{0}_knee_bend_JNT".format(self.model.module_name))[
0]
pmc.parent(self.created_skn_jnts[1], knee_bend_jnt)
knee_bend_ctrl_shape = pmc.circle(c=(0, 0, 0), nr=(0, 1, 0), sw=360, r=1.5, d=3, s=8,
n="{0}_knee_bend_CTRL_shape".format(self.model.module_name), ch=0)[0]
self.knee_bend_ctrl = rig_lib.create_jnttype_ctrl("{0}_knee_bend_CTRL".format(self.model.module_name),
knee_bend_ctrl_shape, drawstyle=2, rotateorder=4)
pmc.select(cl=1)
knee_bend_ctrl_ofs = pmc.joint(p=(0, 0, 0), n="{0}_knee_bend_ctrl_OFS".format(self.model.module_name))
knee_bend_ctrl_ofs.setAttr("drawStyle", 2)
knee_bend_ctrl_ofs.setAttr("rotateOrder", 4)
pmc.parent(self.knee_bend_ctrl, knee_bend_ctrl_ofs)
pmc.parent(knee_bend_ctrl_ofs, self.ctrl_input_grp)
pmc.parentConstraint(self.created_half_bones[1], knee_bend_ctrl_ofs, maintainOffset=0)
self.knee_bend_ctrl.translate >> knee_bend_jnt.translate
self.knee_bend_ctrl.rotate >> knee_bend_jnt.rotate
self.option_ctrl.addAttr("kneeBendCtrl", attributeType="bool", defaultValue=0, hidden=0, keyable=1)
self.created_ctrtl_jnts[0].setAttr("rotate", fk_ctrl_01_value)
self.created_ctrtl_jnts[1].setAttr("rotate", fk_ctrl_02_value)
self.created_ctrtl_jnts[2].setAttr("rotate", fk_ctrl_03_value)
def create_ik_knee_snap(self):
if self.model.stretch_creation_switch:
start_loc = pmc.ls("{0}_ik_length_start_LOC".format(self.model.module_name))[0]
end_loc = pmc.ls("{0}_ik_length_end_LOC".format(self.model.module_name))[0]
else:
start_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_ik_length_start_LOC".format(self.model.module_name))
end_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_ik_length_end_LOC".format(self.model.module_name))
pmc.parent(start_loc, self.created_ctrtl_jnts[0].getParent(), r=1)
pmc.parent(end_loc, self.created_ik_ctrls[0], r=1)
start_loc.setAttr("visibility", 0)
end_loc.setAttr("visibility", 0)
pv_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_ik_length_pole_vector_LOC".format(self.model.module_name))
pmc.parent(pv_loc, self.created_ik_ctrls[1], r=1)
pv_loc.setAttr("visibility", 0)
thigh_distance = pmc.createNode("distanceBetween", n="{0}_ik_thigh_to_pole_vector_length_distBetween".format(self.model.module_name))
calf_distance = pmc.createNode("distanceBetween", n="{0}_ik_pole_vector_to_ankle_length_distBetween".format(self.model.module_name))
start_loc.getShape().worldPosition >> thigh_distance.point1
pv_loc.getShape().worldPosition >> thigh_distance.point2
pv_loc.getShape().worldPosition >> calf_distance.point1
end_loc.getShape().worldPosition >> calf_distance.point2
thigh_blend = pmc.createNode("blendColors", n="{0}_knee_snap_thigh_BLENDCOLOR".format(self.model.module_name))
calf_blend = pmc.createNode("blendColors", n="{0}_knee_snap_calf_BLENDCOLOR".format(self.model.module_name))
if self.model.side == "Right":
invert_thigh_distance = pmc.createNode("multDoubleLinear", n="{0}_ik_thigh_to_pole_vector_invert_length_MDL".format(self.model.module_name))
invert_calf_distance = pmc.createNode("multDoubleLinear", n="{0}_ik_pole_vector_to_ankle_invert_length_MDL".format(self.model.module_name))
invert_thigh_distance.setAttr("input1", -1)
thigh_distance.distance >> invert_thigh_distance.input2
invert_calf_distance.setAttr("input1", -1)
calf_distance.distance >> invert_calf_distance.input2
invert_thigh_distance.output >> thigh_blend.color1R
invert_calf_distance.output >> calf_blend.color1R
else:
thigh_distance.distance >> thigh_blend.color1R
calf_distance.distance >> calf_blend.color1R
if self.model.stretch_creation_switch:
stretch_thigh_output = pmc.listConnections(self.created_ctrtl_jnts[1].translateY, source=1, destination=0,
connections=1)[0][1]
stretch_calf_output = pmc.listConnections(self.created_ctrtl_jnts[2].translateY, source=1, destination=0,
connections=1)[0][1]
stretch_thigh_output.outputR >> thigh_blend.color2R
stretch_calf_output.outputR >> calf_blend.color2R
stretch_thigh_output.outputR // self.created_ctrtl_jnts[1].translateY
stretch_calf_output.outputR // self.created_ctrtl_jnts[2].translateY
else:
self.created_ctrtl_jnts[1].addAttr("baseTranslateY", attributeType="float",
defaultValue=pmc.xform(self.created_ctrtl_jnts[1], q=1, translation=1)[
1],
hidden=0, keyable=0)
self.created_ctrtl_jnts[1].setAttr("baseTranslateY", lock=1, channelBox=0)
self.created_ctrtl_jnts[2].addAttr("baseTranslateY", attributeType="float",
defaultValue=pmc.xform(self.created_ctrtl_jnts[2], q=1, translation=1)[
1],
hidden=0, keyable=0)
self.created_ctrtl_jnts[2].setAttr("baseTranslateY", lock=1, channelBox=0)
self.created_ctrtl_jnts[1].baseTranslateY >> thigh_blend.color2R
self.created_ctrtl_jnts[2].baseTranslateY >> calf_blend.color2R
thigh_blend.outputR >> self.created_ctrtl_jnts[1].translateY
calf_blend.outputR >> self.created_ctrtl_jnts[2].translateY
self.created_ik_ctrls[0].addAttr("snapKnee", attributeType="float", defaultValue=0, hidden=0, keyable=1,
hasMaxValue=1, hasMinValue=1, maxValue=1, minValue=0)
self.created_ik_ctrls[0].snapKnee >> thigh_blend.blender
self.created_ik_ctrls[0].snapKnee >> calf_blend.blender
class Model(AuriScriptModel):
def __init__(self):
AuriScriptModel.__init__(self)
self.selected_module = None
self.selected_output = None
self.side = "Left"
self.ik_creation_switch = True
self.stretch_creation_switch = True
self.raz_ik_ctrls = True
self.raz_fk_ctrls = False
self.clavicle_creation_switch = True
self.fk_ik_type = "one_chain"
# self.bend_creation_switch = False
self.space_list = []
self.deform_chain_creation_switch = True
self.how_many_thigh_jnts = 5
self.how_many_calf_jnts = 5
| |
# GOALS
# Compare rush hour to non for consistency
from pprint import pprint
import csv
from datetime import *
from dateutil.relativedelta import *
import calendar
import dateutil.parser
import matplotlib.pyplot as plt
import operator
# Collect and Store Data in Memory
def fetch_data():
with open("turnstile_150404.txt", "r") as turn_data:
reader=csv.reader(turn_data)
reader.next()
my_list = [ [ cell.strip() for cell in row ] for row in reader]
return my_list
# CHALLENGE ONE
def format_data():
my_hash={}
the_list=fetch_data()
for row in the_list:
key = tuple(row[0:4])
val = row[4:-1]
# check if the key, val exists
if key in my_hash:
my_hash[key].append( val )
else:
my_hash[key] = [ val ]
return my_hash
# CHALLENGE TWO
def format_data_hour():
my_hash={}
the_list = fetch_data()
for index, row in enumerate(the_list):
key = tuple(row[0:4])
time_str = row[6] + " " + row[7]
time = dateutil.parser.parse( time_str )
if index == len(the_list)-1:
entry_count = 0
exit_count = 0
else:
entry_count = int(float(the_list[index+1][-2])) - int(float(row[-2]))
exit_count = int(float(the_list[index+1][-1])) - int(float(row[-1]))
count = entry_count + exit_count
val = [time, count]
if key in my_hash:
my_hash[key].append( val )
else:
my_hash[key] = [ val ]
return my_hash
# CHALLENGE THREE
def create_data_for_day_list():
my_hash={}
the_list=fetch_data()
for index, row in enumerate(the_list):
key = tuple(row[0:4])
time = row[6]
cumalitive_count_entry = int(float(row[-2]))
cumalitive_count_exit = int(float(row[-1]))
cumalitive_count = cumalitive_count_entry + cumalitive_count_exit
val = [time, cumalitive_count]
if key in my_hash:
my_hash[key].append( val )
else:
my_hash[key] = [ val ]
return my_hash
def format_data_day():
my_hash={}
data_hash= create_data_for_day_list()
for k, v in data_hash.items():
i=0
while i < len(v) - 1:
date = v[i][0]
initial_count=v[i][1]
next_count=v[i][1]
while date == v[i][0] and i < len(v)-1:
i += 1
next_count = v[i][1]
new_val = [date, next_count - initial_count ]
if k in my_hash:
my_hash[k].append(new_val)
else:
my_hash[k]= [new_val]
return my_hash
def density_by_turnstile_hash():
mta_day_list = format_data_day().items()
my_hash = {}
for index, row in enumerate(mta_day_list):
station_info = row[0]
# key = tuple([station_info[0], station_info[1], station_info[3]])
key = station_info[3]
vals = []
for val in row[1]:
vals.append(int(val[1]))
week_sum = sum(vals)
if key in my_hash:
my_hash[key].append( week_sum )
else:
my_hash[key] = [ week_sum ]
return my_hash
def station_density():
turnstile_hash = density_by_turnstile_hash()
my_hash = {}
for index, row in enumerate(turnstile_hash.items()):
number_of_turnstiles = len(row[1])
average= float(sum(row[1])) / number_of_turnstiles
avg = float("{0:.2f}".format(average))
my_hash[row[0]]=avg
sorted_hash = sorted( my_hash.items(), key=operator.itemgetter(1) )
return my_hash.items()
def total_day():
turnstile_hash = density_by_turnstile_hash()
my_hash = {}
for index, row in enumerate(turnstile_hash.items()):
station_total= int(sum(row[1]))
my_hash[row[0]]=station_total
sorted_station = sorted( my_hash.items(), key=operator.itemgetter(1) )
return my_hash.items()
def remove_negative_data(list):
new_list = [ row for row in list if row[1] >= 0 ]
return new_list
def commuter_index():
mta_hour_list = format_data_hour().items()
my_hash = {}
for index, row in enumerate(mta_hour_list):
station_info = row[0]
time_info = row[1]
# key = tuple([station_info[0], station_info[1], station_info[3]])
key = station_info[3]
first_hour = time_info[0][0].hour
if first_hour == 1 or first_hour == 2 or first_hour == 3:
morning_val_rush = time_info[1][1]
morning_val_non_rush = time_info[2][1]
elif first_hour == 0:
morning_val_rush = time_info[2][1]
morning_val_non_rush = time_info[3][1]
if first_hour == 0 or first_hour == 1 or first_hour == 2:
evening_val_rush = time_info[4][1]
evening_val_non_rush = time_info[5][1]
elif first_hour == 3:
evening_val_rush = time_info[3][1]
evening_val_non_rush = time_info[4][1]
val_rush = evening_val_rush + morning_val_rush
val_non_rush = evening_val_non_rush + morning_val_non_rush
commuter_index = val_rush - val_non_rush
if key in my_hash:
my_hash[key] += commuter_index
else:
my_hash[key] = commuter_index
sorted_hash = sorted( my_hash.items(), key=operator.itemgetter(1) )
return my_hash.items()
def ultimate_index():
my_hash = {}
commuter_list = remove_negative_data(commuter_index())
density_list = remove_negative_data(station_density())
ultimate_list = zip(commuter_list, density_list)
comb = [ (row[0][0], row[0][1] * row[1][1] ) for row in ultimate_list ]
return comb
def limit_data(our_list):
no_negs_list = remove_negative_data(our_list)
sorted_list = sorted(no_negs_list, key=operator.itemgetter(1))
return sorted_list[-30:]
def export_to_tsv(the_list, file_name):
# my_hash_list = remove_negative_data(commuter_index())
my_hash_list = limit_data(the_list)
output_file = open(file_name, "w")
output_file.write("letter\tfrequency\n")
for row in my_hash_list:
output_file.write(row[0] + "\t" + str(row[1]) + "\n")
# ====================================
# DRIVER CODE
# ====================================
commuter_index_export = export_to_tsv(commuter_index(), "commuter_index.tsv")
station_density_export = export_to_tsv(station_density(), "station_density.tsv")
total_station_export = export_to_tsv(total_day(), "total_station_count.tsv")
ultimate_index_export = export_to_tsv(ultimate_index(), "ultimate.tsv")
| |
# -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from grafana_dashboards.common import get_component_type
from grafana_dashboards.components.axes import Yaxes
from grafana_dashboards.components.base import JsonListGenerator, JsonGenerator
from grafana_dashboards.components.links import Links
from grafana_dashboards.components.targets import Targets
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Panels(JsonListGenerator):
def __init__(self, data, registry):
super(Panels, self).__init__(data, registry, PanelsItemBase)
class PanelsItemBase(JsonGenerator):
pass
class Graph(PanelsItemBase):
_copy_fields = {'stack', 'fill', 'aliasColors', 'leftYAxisLabel', 'bars', 'lines', 'linewidth', 'y_formats',
'x-axis', 'y-axis', 'points', 'pointradius', 'percentage', 'steppedLine', 'repeat',
'repeatDirection', 'decimals', 'minSpan', 'datasource', 'description'}
def gen_json_from_data(self, data, context):
panel_json = super(Graph, self).gen_json_from_data(data, context)
panel_json.update({
'type': 'graph',
'title': self.data.get('title', None),
'span': self.data.get('span', 12),
})
targets = self.data.get('targets', [])
if 'target' in self.data:
targets.append(self.data['target'])
self._create_component(panel_json, Targets, {'targets': targets})
panel_json['nullPointMode'] = self.data.get('nullPointMode', 'null')
grid_data = self.data.get('grid', {}) or {}
if 'grid' in self.data or 'y_formats' in self.data:
panel_json['grid'] = {
'leftMax': grid_data.get('leftMax', None),
'rightMax': grid_data.get('rightMax', None),
'leftMin': grid_data.get('leftMin', None),
'rightMin': grid_data.get('rightMin', None),
'threshold1': grid_data.get('threshold1', None),
'threshold2': grid_data.get('threshold2', None),
'threshold1Color': grid_data.get('threshold1Color', 'rgba(216, 200, 27, 0.27)'),
'threshold2Color': grid_data.get('threshold2Color', 'rgba(234, 112, 112, 0.22)')
}
if 'legend' in self.data:
panel_json['legend'] = {
'show': self.data['legend'].get('show', True),
'values': self.data['legend'].get('values', False),
'min': self.data['legend'].get('min', False),
'max': self.data['legend'].get('max', False),
'current': self.data['legend'].get('current', False),
'total': self.data['legend'].get('total', False),
'avg': self.data['legend'].get('avg', False),
'alignAsTable': self.data['legend'].get('alignAsTable', False),
'rightSide': self.data['legend'].get('rightSide', False),
'hideEmpty': self.data['legend'].get('hideEmpty', False),
'hideZero': self.data['legend'].get('hideZero', False),
'sideWidth': self.data['legend'].get('sideWidth', None)
}
if 'tooltip' in self.data:
panel_json['tooltip'] = {
'value_type': self.data['tooltip'].get('value_type', 'individual'),
'shared': self.data['tooltip'].get('shared', False),
}
if 'seriesOverrides' in self.data:
overrides = []
for override in self.data['seriesOverrides']:
for alias, settings in override.items():
to_add = {'alias': alias}
to_add.update(settings)
overrides.append(to_add)
panel_json['seriesOverrides'] = overrides
self._create_component(panel_json, Links, self.data)
if (('leftYAxisLabel' in self.data
or 'grid' in self.data and ('leftMin' in grid_data or 'leftMax' in grid_data))
and ('y_formats' not in self.data)):
panel_json['y_formats'] = ['short', 'short']
panel_json['xaxis'] = self.data.get('xaxis', {'show': True, 'format': 'time'})
self._create_component(panel_json, Yaxes, self.data)
return panel_json
def _create_component(self, panel_json, clazz, data):
if get_component_type(clazz) in data:
panel_json[get_component_type(clazz)] = self.registry.create_component(clazz, data).gen_json()
class SingleStat(PanelsItemBase):
_copy_fields = {'prefix', 'postfix', 'nullText', 'format', 'thresholds', 'colorValue', 'colorBackground',
'colors', 'prefixFontSize', 'valueFontSize', 'postfixFontSize', 'maxDataPoints', 'datasource',
'repeat', 'repeatDirection', 'decimals', 'minSpan', 'description', 'colorPostfix'}
def gen_json_from_data(self, data, context):
panel_json = super(SingleStat, self).gen_json_from_data(data, context)
panel_json.update({
'type': 'singlestat',
'title': data.get('title', None),
'span': data.get('span', None),
'nullPointMode': data.get('nullPointMode', 'null'),
'valueName': data.get('valueName', 'current')
})
panel_json['targets'] = self.registry.create_component(Targets, data).gen_json() if 'targets' in data else []
if 'sparkline' in data:
panel_json['sparkline'] = {
'show': True,
'full': data['sparkline'].get('full', False),
'lineColor': data['sparkline'].get('lineColor', 'rgb(31, 120, 193)'),
'fillColor': data['sparkline'].get('fillColor', 'rgba(31, 118, 189, 0.18)')
}
if 'gauge' in data:
panel_json['gauge'] = {
'show': True,
'minValue': data['gauge'].get('minValue', 0),
'maxValue': data['gauge'].get('maxValue', 100),
'thresholdMarkers': data['gauge'].get('thresholdMarkers', True),
'thresholdLabels': data['gauge'].get('thresholdLabels', False)
}
if 'colors' not in data:
panel_json['colors'] = [
'rgba(50, 172, 45, 0.97)',
'rgba(237, 129, 40, 0.89)',
'rgba(245, 54, 54, 0.9)'
]
if 'valueMaps' in data:
panel_json['valueMaps'] = [{'value': value, 'op': '=', 'text': text} for value, text in
data['valueMaps'].items()]
if get_component_type(Links) in data:
panel_json['links'] = self.registry.create_component(Links, data).gen_json()
return panel_json
class Table(PanelsItemBase):
# noinspection PySetFunctionToLiteral
_copy_fields = {'fontSize', 'pageSize', 'showHeader', 'scroll', 'datasource', 'description'}
def gen_json_from_data(self, data, context):
panel_json = super(Table, self).gen_json_from_data(data, context)
panel_json.update({
'type': 'table',
'title': data.get('title', None),
'span': data.get('span', None),
'targets': [{'target': v} for v in data.get('targets', [])],
'transform': data.get('transform', None),
'columns': [{'text': v, 'value': str(v).lower()} for v in data.get('columns', [])]
})
panel_json['targets'] = self.registry.create_component(Targets, data).gen_json() if 'targets' in data else []
if 'styles' in self.data:
styles = []
for override in self.data['styles']:
for pattern, settings in override.items():
to_add = {'pattern': pattern}
to_add.update(settings)
styles.append(to_add)
panel_json['styles'] = styles
return panel_json
class Text(PanelsItemBase):
_copy_fields = {'description'}
def gen_json_from_data(self, data, context):
panel_json = super(Text, self).gen_json_from_data(data, context)
panel_json.update({
'type': 'text',
'title': data.get('title', None),
'span': data.get('span', None),
'mode': data.get('mode', 'text'),
'content': data.get('content', '')
})
return panel_json
class Dashlist(PanelsItemBase):
_copy_fields = {'headings', 'limit', 'recent', 'tags', 'query', 'description'}
def gen_json_from_data(self, data, context):
panel_json = super(Dashlist, self).gen_json_from_data(data, context)
panel_json.update({
'type': 'dashlist',
'title': data.get('title', None),
'span': data.get('span', 12),
'search': 'query' in data or 'tags' in data,
'starred': data.get('starred') or ('query' not in data and 'tags' not in data)
})
return panel_json
class Gauge(PanelsItemBase):
_copy_fields = {'datasource', 'pluginVersion'}
def gen_json_from_data(self, data, context):
panel_json = super(Gauge, self).gen_json_from_data(data, context)
panel_json.update({
'type': 'gauge',
'title': data.get('title', None),
'span': data.get('span', 12),
'timeFrom': data.get('timeFrom', None),
'timeShift': data.get('timeShift', None)
})
panel_json['targets'] = self.registry.create_component(Targets, data).gen_json() if 'targets' in data else []
options_data = self.data.get('options', {}) or {}
field_options_data = options_data.get('fieldOptions', {}) or {}
field_options = {
'values': field_options_data.get('values', False),
'calcs': field_options_data.get('calcs', ['mean']),
'defaults': field_options_data.get('defaults',
{'mappings': [], 'thresholds': {'mode': 'absolute', 'steps': []}}),
'overrides': field_options_data.get('overrides', [])
}
panel_json['options'] = {
'showThresholdMarkers': options_data.get('showThresholdMarkers', False),
'showThresholdLabels': options_data.get('showThresholdLabels', False),
'orientation': options_data.get('orientation', 'auto'),
'fieldOptions': field_options
}
return panel_json
class Stat(PanelsItemBase):
_copy_fields = {'datasource', 'pluginVersion'}
def gen_json_from_data(self, data, context):
panel_json = super(Stat, self).gen_json_from_data(data, context)
panel_json.update({
'type': 'stat',
'title': data.get('title', None),
'span': data.get('span', 12),
'timeFrom': data.get('timeFrom', None),
'timeShift': data.get('timeShift', None)
})
panel_json['targets'] = self.registry.create_component(Targets, data).gen_json() if 'targets' in data else []
options_data = self.data.get('options', {}) or {}
field_options_data = options_data.get('fieldOptions', {}) or {}
field_options = {
'values': field_options_data.get('values', False),
'calcs': field_options_data.get('calcs', ['mean']),
'defaults': field_options_data.get('defaults',
{'mappings': [], 'thresholds': {'mode': 'absolute', 'steps': []}}),
'overrides': field_options_data.get('overrides', [])
}
panel_json['options'] = {
'graphMode': options_data.get('graphMode', 'area'),
'colorMode': options_data.get('colorMode', 'value'),
'justifyMode': options_data.get('justifyMode', 'auto'),
'orientation': options_data.get('orientation', 'auto'),
'fieldOptions': field_options
}
return panel_json
class BarGauge(PanelsItemBase):
_copy_fields = {'datasource'}
def gen_json_from_data(self, data, context):
panel_json = super(BarGauge, self).gen_json_from_data(data, context)
panel_json.update({
'type': 'bargauge',
'title': data.get('title', None),
'span': data.get('span', 12),
'timeFrom': data.get('timeFrom', None),
'timeShift': data.get('timeShift', None)
})
panel_json['targets'] = self.registry.create_component(Targets, data).gen_json() if 'targets' in data else []
options_data = self.data.get('options', {}) or {}
field_options_data = options_data.get('fieldOptions', {}) or {}
field_options = {
'values': field_options_data.get('values', False),
'calcs': field_options_data.get('calcs', ['last']),
'defaults': field_options_data.get('defaults', None),
'mappings': field_options_data.get('mappings', []),
'thresholds': field_options_data.get('thresholds', []),
'override': field_options_data.get('override', None)
}
panel_json['options'] = {
'orientation': options_data.get('orientation', 'auto'),
'displayMode': options_data.get('displayMode', 'lcd'),
'fieldOptions': field_options
}
return panel_json
| |
#!/usr/bin/env python
# This little script allows check DAS is up and running
# It is derived from the DAS CLI code that can be downloaded at
# https://cmsweb.cern.ch/das/cli
import sys
if sys.version_info < (2, 6):
raise Exception("DAS requires python 2.6 or greater")
import os
import re
import time
import json
import urllib
import urllib2
import httplib
import string
from optparse import OptionParser, OptionGroup
class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
"""
Simple HTTPS client authentication class based on provided
key/ca information
"""
def __init__(self, key=None, cert=None, level=0):
if level:
urllib2.HTTPSHandler.__init__(self, debuglevel=1)
else:
urllib2.HTTPSHandler.__init__(self)
self.key = key
self.cert = cert
def https_open(self, req):
"""Open request method"""
#Rather than pass in a reference to a connection class, we pass in
# a reference to a function which, for all intents and purposes,
# will behave as a constructor
return self.do_open(self.get_connection, req)
def get_connection(self, host, timeout=300):
"""Connection method"""
if self.key:
return httplib.HTTPSConnection(host, key_file=self.key,
cert_file=self.cert)
return httplib.HTTPSConnection(host)
class DASOptionParser:
"""
DAS cache client option parser
"""
def __init__(self):
usage = "Usage: %prog [options]\n"
usage += "where dataset is the requested CMS dataset as documented on DAS"
self.parser = OptionParser(usage=usage)
# ---- DAS options
das_group = OptionGroup(self.parser,"DAS options",
"The following options control the communication with the DAS server")
msg = "host name of DAS cache server, default is https://cmsweb.cern.ch"
das_group.add_option("--host", action="store", type="string",
default='https://cmsweb.cern.ch', dest="host", help=msg)
msg = "index for returned result"
das_group.add_option("--idx", action="store", type="int",
default=0, dest="idx", help=msg)
msg = 'query waiting threshold in sec, default is 5 minutes'
das_group.add_option("--threshold", action="store", type="int",
default=300, dest="threshold", help=msg)
msg = 'specify private key file name'
das_group.add_option("--key", action="store", type="string",
default="", dest="ckey", help=msg)
msg = 'specify private certificate file name'
das_group.add_option("--cert", action="store", type="string",
default="", dest="cert", help=msg)
msg = 'specify number of retries upon busy DAS server message'
das_group.add_option("--retry", action="store", type="string",
default=0, dest="retry", help=msg)
msg = 'drop DAS headers'
das_group.add_option("--das-headers", action="store_true",
default=False, dest="das_headers", help=msg)
msg = 'verbose output'
das_group.add_option("-v", "--verbose", action="store",
type="int", default=0, dest="verbose", help=msg)
self.parser.add_option_group(das_group)
def get_opt(self):
"""
Returns parse list of options
"""
opts, args = self.parser.parse_args()
return opts
def fullpath(path):
"Expand path to full path"
if path:
path = os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
return path
def get_data(host, query, idx, limit, debug, threshold=300, ckey=None,
cert=None, das_headers=True):
"""Contact DAS server and retrieve data for given DAS query"""
params = {'input':query, 'idx':idx, 'limit':limit}
path = '/das/cache'
pat = re.compile('http[s]{0,1}://')
if not pat.match(host):
msg = 'Invalid hostname: %s' % host
raise Exception(msg)
url = host + path
headers = {"Accept": "application/json"}
encoded_data = urllib.urlencode(params, doseq=True)
url += '?%s' % encoded_data
req = urllib2.Request(url=url, headers=headers)
if ckey and cert:
ckey = fullpath(ckey)
cert = fullpath(cert)
hdlr = HTTPSClientAuthHandler(ckey, cert, debug)
else:
hdlr = urllib2.HTTPHandler(debuglevel=debug)
opener = urllib2.build_opener(hdlr)
fdesc = opener.open(req)
data = fdesc.read()
fdesc.close()
pat = re.compile(r'^[a-z0-9]{32}')
if data and isinstance(data, str) and pat.match(data) and len(data) == 32:
pid = data
else:
pid = None
iwtime = 2 # initial waiting time in seconds
wtime = 20 # final waiting time in seconds
sleep = iwtime
time0 = time.time()
while pid:
params.update({'pid':data})
encoded_data = urllib.urlencode(params, doseq=True)
url = host + path + '?%s' % encoded_data
req = urllib2.Request(url=url, headers=headers)
try:
fdesc = opener.open(req)
data = fdesc.read()
fdesc.close()
except urllib2.HTTPError as err:
return {"status":"fail", "reason":str(err)}
if data and isinstance(data, str) and pat.match(data) and len(data) == 32:
pid = data
else:
pid = None
time.sleep(sleep)
if sleep < wtime:
sleep *= 2
elif sleep == wtime:
sleep = iwtime # start new cycle
else:
sleep = wtime
if (time.time()-time0) > threshold:
reason = "client timeout after %s sec" % int(time.time()-time0)
return {"status":"fail", "reason":reason}
jsondict = json.loads(data)
if das_headers:
return jsondict
# drop DAS headers, users usually don't need them
status = jsondict.get('status')
if status != 'ok':
return jsondict
drop_keys = ['das_id', 'cache_id', 'qhash', '_id', 'das']
for row in jsondict['data']:
for key in drop_keys:
del row[key]
#return jsondict['data']
return jsondict
def main():
"""Main function"""
# get the options
optmgr = DASOptionParser()
opts = optmgr.get_opt()
host = opts.host
debug = opts.verbose
sample = "/TT_TuneCUETP8M1_13TeV-powheg-pythia8/RunIISpring15DR74-Asympt25ns_MCRUN2_74_V9-v2/MINIAODSIM"
query1 = "dataset="+sample+" | grep dataset.name, dataset.nevents, dataset.size, dataset.tag, dataset.datatype, dataset.creation_time"
query2 = "release dataset="+sample+" | grep release.name"
query3 = "config dataset="+sample+" | grep config.global_tag,config.name=cmsRun"
idx = opts.idx
thr = opts.threshold
ckey = opts.ckey
cert = opts.cert
das_h = opts.das_headers
# perform the DAS query
jsondict1 = get_data(host, query1, idx, 1, debug, thr, ckey, cert, das_h)
#jsondict2 = get_data(host, query2, idx, 1, debug, thr, ckey, cert, das_h)
#jsondict3 = get_data(host, query3, idx, 1, debug, thr, ckey, cert, das_h)
try:
if len(jsondict1["data"])==1 and jsondict1["status"]== 'ok':
outcome = { "result":"good", "data":jsondict1["data"][0] }
else:
outcome = { "result":"badbad", "exception":"false" }
print "Content-Type: application/json"
print
print json.dumps(outcome)
except:
outcome = { "result":"except", "exception":"true" }
print "Content-Type: application/json"
print
print json.dumps(outcome)
#
# main
#
if __name__ == '__main__':
main()
| |
#!/usr/bin/python2.7
#
# setup_leveldb.py
#
# Compiles and installs Minecraft Pocket Edtition binary support.
#
__author__ = "D.C.-G. 2017"
__version__ = "0.4.0"
import sys
import os
import platform
import fnmatch
import re
if sys.platform != "linux2":
print "This script can't run on other platforms than Linux ones..."
sys.exit(1)
bin_deps = ('gcc', 'g++', 'unzip', 'wget|curl')
wget_curl = None
wget_cmd = "wget -q --no-check-certificate -O"
curl_cmd = "curl -LskS -o"
leveldb_mojang_sources_url = "https://codeload.github.com/Mojang/leveldb-mcpe/zip/"
leveldb_mojang_commit = "5722a489c0fabf70f7bb36f70adc2ac70ff90377"
# leveldb_other_sources_url = "https://codeload.github.com/jocopa3/leveldb-mcpe/zip/"
# leveldb_other_commit = "56bdd1f38dde7074426d85eab01a5c1c0b5b1cfe"
leveldb_other_sources_url = "https://codeload.github.com/LaChal/leveldb-mcpe/zip/"
leveldb_other_commit = "9191f0499cd6d71e4e08c513f3a331a1ffe24332"
zlib_sources_url = "https://codeload.github.com/madler/zlib/zip/"
zlib_commit = "4a090adef8c773087ec8916ad3c2236ef560df27"
zlib_ideal_version = "1.2.8"
zlib_minimum_version = "1.2.8"
zlib_supported_versions = (zlib_minimum_version, zlib_ideal_version, "1.2.10")
silent = False
def check_bins(bins):
print 'Searching for the needed binaries %s...' % repr(bins).replace("'", '')
missing_bin = False
for name in bins:
names = []
if '|' in name:
names = name.split('|')
if names:
found = False
for n in names:
if not os.system('which %s > /dev/null' % n):
found = True
break
else:
print "Could not find %s." % n
if found:
g_keys = globals().keys()
g_name = name.replace('|', '_')
print "g_name", g_name, g_name in g_keys
if g_name in g_keys:
globals()[g_name] = globals()['%s_cmd' % n]
else:
print '*** WARNING: None of these binaries were found on your system: %s.'%', '.join(names)
else:
if os.system('which %s > /dev/null' % name):
print '*** WARNING: %s not found.' % name
missing_bin = True
if missing_bin:
if not silent:
a = raw_input('The binary dependencies are not satisfied. The build may fail.\nContinue [y/N]?')
else:
a = 'n'
if a and a in 'yY':
pass
else:
sys.exit(1)
else:
print 'All the needed binaries were found.'
# Picked from another project to find the lib and adapted to the need
ARCH = {'32bit': '32', '64bit': '64'}[platform.architecture()[0]]
default_paths = ['/lib', '/lib32', '/lib64', '/usr/lib', '/usr/lib32','/usr/lib64',
'/usr/local/lib', os.path.expanduser('~/.local/lib'), '.']
# Gather the libraries paths.
def get_lib_paths(file_name):
paths = []
if os.path.isfile(file_name):
lines = [a.strip() for a in open(file_name).readlines()]
for i, line in enumerate(lines):
if not line.startswith('#') and line.strip():
if line.startswith('include'):
line = line.split(' ', 1)[1]
if '*' in line:
pat = r"%s" % line.split(os.path.sep)[-1].replace('.', '\.').replace('*', '.*')
d = os.path.split(line)[0]
if os.path.isdir(d):
for n in os.listdir(d):
r = re.findall(pat, n)
if r:
paths += [a for a in get_lib_paths(os.path.join(d, n)) if a not in paths]
else:
paths += [a for a in get_lib_paths(line) if not a in paths]
elif not line in paths and os.path.isdir(line):
paths.append(line)
return paths
def find_lib(lib_name, input_file='/etc/ld.so.conf'):
paths = default_paths + get_lib_paths(input_file)
arch_paths = []
other_paths = []
while paths:
path = paths.pop(0)
if ARCH in path:
arch_paths.insert(0, path)
elif path.endswith('/lib'):
arch_paths.append(path)
else:
other_paths.append(path)
paths = arch_paths + other_paths
found = None
r = None
ver = None
name = lib_name
hash_list = name.split('.')
hash_list.reverse()
idx = hash_list.index('so')
i = 0
while i <= idx and not found:
for path in paths:
print "Scanning %s for %s" % (path, name)
if os.path.exists(path):
for path, dirnames, filenames in os.walk(path):
if name in filenames:
found = os.path.join(path, name)
break
if found:
break
i += 1
name = name.rsplit('.', 1)[0]
cur_dir = os.getcwd()
os.chdir(path)
if found:
base_path = os.path.split(found)[0]
while os.path.islink(found):
found = os.readlink(found)
if not found.startswith("/"):
found = os.path.abspath(os.path.join(base_path, found))
# Verify the architecture of the library
inp, outp = os.popen2('file %s | grep "ELF %s"' % (found, ARCH))
r = bool(outp.read())
inp.close()
outp.close()
# If the architecture could not be check with library internal data, rely on the folder name.
if os.path.split(found)[0] in arch_paths:
r = True
v = found.rsplit('.so.', 1)
if len(v) == 2:
ver = v[1]
os.chdir(cur_dir)
return found, r, ver
def get_sources(name, url):
print "Downloading sources for %s" % name
print "URL: %s" % url
os.system("%s %s.zip %s" % (wget_curl, name, url))
print "Unpacking %s" % name
os.system("unzip -q %s.zip" % name)
os.system("mv $(ls -d1 */ | egrep '{n}-') {n}".format(n=name))
print "Cleaning archive."
os.remove("%s.zip" % name)
def build_zlib():
print "Building zlib..."
return os.WEXITSTATUS(os.system("./configure; make"))
# Bad and dirty, but working untill Mojang implement this (or like): code injection
# directly into the sources before compilation.
# As an alternative to this, you may prefer to run this script with the '--alt-leveldb'
# CLI option, since the other repository don't need this code injection.
before = 0
after = 1
c_inject = {
"c.h": {
# The hook to look for code injection.
"hook": """
extern void leveldb_options_set_compression(leveldb_options_t*, int);
""",
# The data to be injected.
"data": """
extern void leveldb_options_set_compressor(leveldb_options_t*, int, int);
""",
# Where to inject the data: after the "hook" or before it.
"where": after
},
"c.cc": {
"hook": """
void leveldb_options_set_compression(leveldb_options_t* opt, int t) {
""",
"data": """
void leveldb_options_set_compressor(leveldb_options_t* opt, int i, int t) {
switch(t) {
case 0:
opt->rep.compressors[i] = nullptr;
break;
#ifdef SNAPPY
case leveldb_snappy_compression:
opt->rep.compressors[i] = new leveldb::SnappyCompressor();
break;
#endif
case leveldb_zlib_compression:
opt->rep.compressors[i] = new leveldb::ZlibCompressor();
break;
case leveldb_zlib_raw_compression:
opt->rep.compressors[i] = new leveldb::ZlibCompressorRaw();
break;
}
}
""",
"where": before
}
}
def build_leveldb(zlib):
print "Building leveldb..."
# Inject the needed code into the sources.
for root, d_names, f_names in os.walk("."):
for f_name in fnmatch.filter(f_names, "c.[ch]*"):
if f_name in c_inject.keys():
hook = c_inject[f_name]["hook"]
data = c_inject[f_name]["data"]
where = c_inject[f_name]["where"]
with open(os.path.join(root, f_name), "r+") as fd:
f_data = fd.read()
if data not in f_data:
if where == before:
c_data = "\n".join((data, hook))
else:
c_data = "\n".join((hook, data))
fd.seek(0)
fd.write(f_data.replace(hook, c_data))
if zlib:
with open("Makefile", "r+") as f:
# If '-lz' is specified, we *may* need to specify the full library path. Just force it to be sure.
data = f.read().replace("LIBS += $(PLATFORM_LIBS) -lz", "LIBS += $(PLATFORM_LIBS) %s" % zlib)
# All the same if a path is specified, we need the one we found here. (SuSE don't have a /lib/x64_86-linux-gnu directory.)
data = data.replace("LIBS += $(PLATFORM_LIBS) /lib/x86_64-linux-gnu/libz.so.1.2.8", "LIBS += $(PLATFORM_LIBS) %s" % zlib)
f.seek(0)
f.write(data)
cpath = os.environ.get("CPATH")
if cpath:
os.environ["CPATH"] = ":".join(("./zlib", cpath))
else:
os.environ["CPATH"] = "./zlib"
return os.WEXITSTATUS(os.system("make"))
def request_zlib_build():
print " Enter 'b' to build zlib v%s only for leveldb." % zlib_ideal_version
print " Enter 'a' to quit now and install zlib yourself."
print " Enter 'c' to continue."
a = ""
if not silent:
while a.lower() not in "abc":
a = raw_input("Build zlib [b], abort [a] or continue [c]? ")
else:
a = "a"
if a == "b":
return True
elif a == "a":
sys.exit(1)
elif a == "c":
return None
def main():
print "=" * 72
print "Building Linux Minecraft Pocket Edition for MCEdit..."
print "-----------------------------------------------------"
global leveldb_commit
global zlib_commit
global zlib_sources_url
global silent
global zlib_ideal_version
force_zlib = False
leveldb_source_url = leveldb_mojang_sources_url
leveldb_commit = leveldb_mojang_commit
cur_dir = os.getcwd()
if "--force-zlib" in sys.argv:
force_zlib = True
sys.argv.remove("--force-zlib")
if "--alt-leveldb" in sys.argv:
leveldb_source_url = leveldb_other_sources_url
leveldb_commit = leveldb_other_commit
for arg, var in (("--leveldb-source-url", "leveldb_source_url"),
("--leveldb-commit", "leveldb_commit"),
("--zlib-source-url", "zlib_source_url"),
("--zlib-commit", "zlib_commit")):
if arg in sys.argv:
globals()[var] = sys.argv[sys.argv.index(arg) + 1]
leveldb_source_url += leveldb_commit
zlib_sources_url += zlib_commit
if "--silent" in sys.argv:
silent = True
if "--debug-cenv" in sys.argv:
print 'CPATH:', os.environ.get('CPATH', 'empty!')
print 'PATH:', os.environ.get('PATH', 'empty!')
print 'LD_LIBRARY_PATH', os.environ.get('LD_LIBRARY_PATH', 'empty!')
print 'LIBRARY_PATH', os.environ.get('LIBRARY_PATH', 'empty!')
check_bins(bin_deps)
# Get the sources here.
get_sources("leveldb", leveldb_source_url)
os.chdir("leveldb")
get_sources("zlib", zlib_sources_url)
os.chdir(cur_dir)
zlib = (None, None, None)
# Check zlib
if not force_zlib:
print "Checking zlib."
zlib = find_lib("libz.so.%s" % zlib_ideal_version)
print zlib
if zlib == (None, None, None):
zlib = None
print "*** WARNING: zlib not found!"
print " It is recommended you install zlib v%s on your system or" % zlib_ideal_version
print " let this script install it only for leveldb."
force_zlib = request_zlib_build()
else:
if zlib[2] == None:
print "*** WARNING: zlib has been found, but the exact version could not be"
print " determined."
print " It is recommended you install zlib v%s on your system or" % zlib_ideal_version
print " let this script install it only for leveldb."
force_zlib = request_zlib_build()
elif zlib[2] not in zlib_supported_versions:
print "*** WARNING: zlib was found, but its version is %s." % zlib[2]
print " You can try to build with this version, but it may fail,"
print " or the generated libraries may not work..."
force_zlib = request_zlib_build()
if zlib[1] == False:
print "*** WARNING: zlib has been found on your system, but not for the"
print " current architecture."
print " You apparently run on a %s, and the found zlib is %s" % (ARCH, zlib[0])
print " Building the Pocket Edition support may fail. If not,"
print " the support may not work."
print " You can continue, but it is recommended to install zlib."
force_zlib = request_zlib_build()
if force_zlib is None:
print "Build continues with zlib v%s" % zlib[2]
else:
print "Found compliant zlib v%s." % zlib[2]
zlib = zlib[0]
if force_zlib:
os.chdir("leveldb/zlib")
r = build_zlib()
if r:
print "Zlib build failed."
return r
os.chdir(cur_dir)
os.rename("leveldb/zlib/libz.so.1.2.10", "./libz.so.1.2.10")
os.rename("leveldb/zlib/libz.so.1", "./libz.so.1")
os.rename("leveldb/zlib/libz.so", "./libz.so")
for root, d_names, f_names in os.walk("leveldb"):
for f_name in fnmatch.filter(f_names, "libz.so*"):
os.rename(os.path.join(root, f_name), os.path.join(".", f_name))
# Tweak the leveldb makefile to force the linker to use the built zlib
with open("leveldb/Makefile", "r+") as f:
data = f.read()
data = data.replace("PLATFORM_SHARED_LDFLAGS", "PSL")
data = data.replace("LDFLAGS += $(PLATFORM_LDFLAGS)",
"LDFLAGS += $(PLATFORM_LDFLAGS)\nPSL = -L{d} -lz -Wl,-R{d} $(PLATFORM_SHARED_LDFLAGS)".format(d=cur_dir))
data = data.replace("LIBS += $(PLATFORM_LIBS) -lz", "LIBS += -L{d} -lz -Wl,-R{d} $(PLATFORM_LIBS)".format(d=cur_dir))
f.seek(0)
f.write(data)
zlib = None
os.chdir("leveldb")
r = build_leveldb(zlib)
if r:
print "PE support build failed."
return r
os.chdir(cur_dir)
for root, d_names, f_names in os.walk("leveldb"):
for f_name in fnmatch.filter(f_names, "libleveldb.so*"):
os.rename(os.path.join(root, f_name), os.path.join(".", f_name))
print "Setup script ended."
if __name__ == "__main__":
sys.exit(main())
| |
import time, copy
import os
import sys
import numpy
import h5py
#from PnSC_ui import *
#from PnSC_dataimport import *
from PnSC_SCui import *
#from PnSC_math import *
from PnSC_h5io import *
from PnSC_main import *
from matplotlib.ticker import FuncFormatter
import scipy.integrate
celllist=[1, 2]+range(4, 21)+[22]+[24, 25]
for selectcell in celllist:
print selectcell
p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/AuSiCu_pnsc_all.h5'
def myexpformat(x, pos):
for ndigs in range(2):
lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e-')
if eval(lab)==x:
return lab
return lab
ExpTickLabels=FuncFormatter(myexpformat)
def make_ticklabels_invisible(ax, x=True, y=True):
if x:
for tl in ax.get_xticklabels():
tl.set_visible(False)
if y:
for tl in ax.get_yticklabels():
tl.set_visible(False)
def heatrate_T(d, T, Twin=10.):
#i=numpy.argmin((T-d['sampletemperature'])**2)
Ta=d['sampletemperature'][cycleindex]
x=numpy.where((Ta>=T-Twin)&(Ta<=T+Twin))[0]
prev=numpy.array([not (t-1 in x) for t in x])
previ=numpy.where(prev)[0]
if len(previ)==0:
return 0.
stopi=numpy.append(previ[1:],len(x))
longestbunchind=numpy.argmax(stopi-previ)
inds=x[previ[longestbunchind]:stopi[longestbunchind]]
return d['sampleheatrate'][cycleindex][inds].mean()
def findenthalpyandpinacles(segdict, critenth=1.e-5, dTmin=.4, Tmeanmin=100.):
T=segdict['sampletemperature'][cycleindex]
C=segdict['sampleheatcapacity'][cycleindex]
nci=numpy.where((C[:-1]>0.)&(C[1:]<=0.))[0]#neg crossings
pci=numpy.where((C[1:]>0.)&(C[:-1]<=0.))[0]#pos crossings
ci=numpy.sort(numpy.concatenate([nci, pci]))
ans=[]
for i, j in zip(ci[:-1], ci[1:]):
enth=scipy.integrate.trapz(C[i:j], T[i:j])
if numpy.abs(enth)>critenth and (T[j]-T[i])>dTmin:
itemp=numpy.argmax(numpy.abs(C[i:j]))
Tmean=scipy.integrate.trapz(C[i:j]*T[i:j], T[i:j])/scipy.integrate.trapz(C[i:j], T[i:j])
if Tmean<Tmeanmin:
continue
ans+=[dict([('enthalpy', enth), ('T_Cmax', T[i:j][itemp]), ('Cmax', C[i:j][itemp]), ('Tweightedmean', Tmean), ('cycindstart', i), ('cycindstop', j)])]
return ans
nskip=100
cycleindex=0
#p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/AuSiCu_pnsc_all.h5'
#p=mm.h5path
#f=h5py.File(p, mode='r+')
#f=h5py.File(p, mode='r')
savef='C:/Users/JohnnyG/Documents/HarvardWork/MG/PnSCplots/batchplotbycell_June2'
plotTlim=(50., 700.)
expdicts=[\
dict([('name', 'heat1a'), ('heatseg', 2), ('coolseg', 4)]), \
dict([('name', 'heat1b'), ('heatseg', 2), ('coolseg', 4)]), \
dict([('name', 'heat1c'), ('heatseg', 2), ('coolseg', 4)]), \
dict([('name', 'heat1d'), ('heatseg', 2), ('coolseg', 4)]), \
dict([('name', 'heat2'), ('heatseg', 2), ('coolseg', 4)]), \
dict([('name', 'heat3'), ('heatseg', 2), ('coolseg', 4)]), \
dict([('name', 'heat4a'), ('heatseg', 2), ('coolseg', 4)]), \
dict([('name', 'heat4b'), ('heatseg', 2), ('coolseg', 4)]), \
dict([('name', 'heat4c'), ('heatseg', 2), ('coolseg', 3)]), #never partitioned\
dict([('name', 'heat6a'), ('heatseg', 2), ('coolseg', 4)]), #never partitioned\
dict([('name', 'heat6b'), ('heatseg', 2), ('coolseg', 4)]), \
dict([('name', 'heat6c'), ('heatseg', 2), ('coolseg', 3)]), #never partitioned\
dict([('name', 'heat7'), ('heatseg', 2), ('coolseg', 3)]), #never partitioned\
dict([('name', 'heat8'), ('heatseg', 2), ('coolseg', 4)]), \
]
metadictlist=[]
allsegdict=[]
for ed in expdicts:
exp=ed['name']
f, hppaths=experimenthppaths(p, exp)
f.close()
hpsdl=None
saveh5hpname=None
for hpp in hppaths:
h5hpname=hpp.rpartition('/')[2]
f, g=gethpgroup(p, exp, h5hpname=h5hpname)
cell=g.attrs['CELLNUMBER']
f.close()
if cell!=selectcell:
continue
hpsdl=CreateHeatProgSegDictList(p, exp, h5hpname)
saveh5hpname=h5hpname
if not hpsdl is None:
allsegdict+=[hpsdl]
ed['h5hpname']=saveh5hpname
metadictlist+=[copy.deepcopy(ed)]
for i, (metadict, hpsdl) in enumerate(zip(metadictlist, allsegdict)):
if hpsdl is None:
continue
prevhpsdl=None
for tempmetadict, prevhpsdl in zip(metadictlist[:i][::-1], allsegdict[:i][::-1]):
if not prevhpsdl is None:
prevmetadict=tempmetadict
#prevmetadict=metadictlist[allsegdict.index(prevhpsdl)]
break
if prevhpsdl is None:
metadict['prevcoolrate_180C']=-1.e2
metadict['prevcoolrate_250C']=-1.e2
metadict['prevcoolrate_320C']=-1.e2
metadict['prevcoolrate_400C']=-1.e2
metadict['prevname']='heat0'
else:
metadict['prevcoolrate_180C']=heatrate_T(prevhpsdl[prevmetadict['coolseg']], 180.)
metadict['prevcoolrate_250C']=heatrate_T(prevhpsdl[prevmetadict['coolseg']], 250.)
metadict['prevcoolrate_320C']=heatrate_T(prevhpsdl[prevmetadict['coolseg']], 320.)
metadict['prevcoolrate_400C']=heatrate_T(prevhpsdl[prevmetadict['coolseg']], 400.)
metadict['prevname']=prevmetadict['name']
print metadict['name'], ', previous scan ', metadict['prevname']
metadict['heatrate_170C500C']=heatrate_T(hpsdl[metadict['heatseg']], 335., Twin=165.)
if not 'sampleheatcapacity' in hpsdl[metadict['heatseg']].keys():
continue
enthdlist=findenthalpyandpinacles(hpsdl[metadict['heatseg']], critenth=1.e-6, dTmin=10.)
if len(enthdlist)>0:
for k in enthdlist[0].keys():
metadict['Cpregions_'+k]=[]
for d in enthdlist:
metadict['Cpregions_'+k]+=[d[k]]
metadict['Cpregions_'+k]=numpy.array(metadict['Cpregions_'+k])
metadict['Cpregions_glassind']=-1
metadict['Cpregions_xtalind']=-1
metadict['Cpregions_meltind']=-1
metadict['Cpregions_melt2ind']=-1
ctemp=0.
for enthind, d in enumerate(enthdlist):
if d['Tweightedmean']<100. or d['Cmax']<0:
continue
if d['Tweightedmean']>250.:
break
if metadict['Cpregions_glassind']<0 or numpy.abs(d['enthalpy'])>ctemp:
metadict['Cpregions_glassind']=enthind
ctemp=numpy.abs(d['enthalpy'])
ctemp=0.
for enthind, d in enumerate(enthdlist):
if d['Tweightedmean']<180. or d['Cmax']>0:
continue
#if metadict['Cpregions_glassind']<0 or d['Tweightedmean']>450.:
if d['Tweightedmean']>450.:
break
if metadict['Cpregions_xtalind']<0 or numpy.abs(d['enthalpy'])>ctemp:
metadict['Cpregions_xtalind']=enthind
ctemp=numpy.abs(d['enthalpy'])
ctemp=0.
ctemp2=0.
for enthind, d in enumerate(enthdlist):
if d['Tweightedmean']<300. or d['Cmax']<0:
continue
if d['Tweightedmean']>600.:
break
if metadict['Cpregions_meltind']<0:
metadict['Cpregions_meltind']=enthind
ctemp=numpy.abs(d['enthalpy'])
else:
if numpy.abs(d['enthalpy'])>ctemp:
if metadict['Cpregions_meltind']<0 or ctemp>ctemp2:
metadict['Cpregions_melt2ind']=metadict['Cpregions_meltind']
ctemp2=ctemp
metadict['Cpregions_meltind']=enthind
ctemp=numpy.abs(d['enthalpy'])
elif numpy.abs(d['enthalpy'])>ctemp2:
metadict['Cpregions_melt2ind']=enthind
ctemp2=numpy.abs(d['enthalpy'])
if selectcell==1 and metadict['name']=='heat8':
metadict['Cpregions_glassind']=-1
if selectcell==7 and metadict['name']=='heat1a':
metadict['Cpregions_xtalind']=-1
if selectcell==12 and metadict['name']=='heat1a':
metadict['Cpregions_glassind']=-1
metadict['Cpregions_xtalind']=-1
if selectcell==13 and metadict['name']=='heat1a':
metadict['Cpregions_glassind']=-1
metadict['Cpregions_xtalind']=-1
if selectcell==14 and metadict['name']=='heat1a':
metadict['Cpregions_glassind']=-1
metadict['Cpregions_xtalind']=-1
if selectcell==16 and metadict['name']=='heat1a':
metadict['Cpregions_glassind']=-1
metadict['Cpregions_xtalind']=-1
if selectcell==17 and metadict['name']=='heat1a':
metadict['Cpregions_glassind']=-1
metadict['Cpregions_xtalind']=-1
if selectcell==18 and metadict['name']=='heat8':
metadict['Cpregions_glassind']=-1
if selectcell==22 and metadict['name']=='heat1a':
metadict['Cpregions_glassind']=-1
metadict['Cpregions_xtalind']=-1
if selectcell==25 and metadict['name']=='heat8':
metadict['Cpregions_glassind']=-1
savebool=True
if savebool:
f=h5py.File(p, mode='r+')
if 'calbycellmetadata' in f:
g=f['calbycellmetadata']
else:
g=f.create_group('calbycellmetadata')
if `selectcell` in g:
cg=g[`selectcell`]
else:
cg=g.create_group(`selectcell`)
for metadict in metadictlist:
if selectcell==1 and metadict['name'].startswith('heat1'):
continue
if selectcell==6 and metadict['name']=='heat6a':
continue
if selectcell==16 and (metadict['name']=='heat2' or metadict['name']=='heat8'):
continue
if selectcell==19 and metadict['name'].startswith('heat1'):
continue
if selectcell==20 and metadict['name']=='heat4a':
continue
if selectcell==22 and metadict['name']=='heat4b':
continue
if selectcell==24 and metadict['name']=='heat6a':
continue
if selectcell==25 and (metadict['name']=='heat2' or metadict['name']=='heat3'):
continue
if metadict['name'] in cg:
mg=cg[metadict['name']]
else:
mg=cg.create_group(metadict['name'])
for k, v in metadict.iteritems():
mg.attrs[k]=v
f.close()
plotcpregions=True
if plotcpregions:
#pylab.figure(figsize=(8, 6))
for i, (metadict, hpsdl) in enumerate(zip(metadictlist, allsegdict)):
if not 'Cpregions_enthalpy' in metadict.keys():
continue
T=hpsdl[metadict['heatseg']]['sampletemperature'][cycleindex]
C=hpsdl[metadict['heatseg']]['sampleheatcapacity'][cycleindex]
pylab.plot(T, C)
pv=numpy.max(hpsdl[metadict['heatseg']]['sampleheatcapacity'][cycleindex])
nv=numpy.min(hpsdl[metadict['heatseg']]['sampleheatcapacity'][cycleindex])
lgen=lambda s:((s>0) and ([pv, pv], ) or ([nv, nv], ))[0]
rxnindlist=[metadict['Cpregions_glassind'], metadict['Cpregions_xtalind'], metadict['Cpregions_meltind'], metadict['Cpregions_melt2ind']]
for regind, (enth, Tp, Cp, Tmean, i, j) in enumerate(zip(metadict['Cpregions_enthalpy'], metadict['Cpregions_T_Cmax'], metadict['Cpregions_Cmax'], metadict['Cpregions_Tweightedmean'], metadict['Cpregions_cycindstart'], metadict['Cpregions_cycindstop'])):
if regind in rxnindlist:
col=['b', 'g', 'r', 'r'][rxnindlist.index(regind)]
else:
col=(.4, .4, .4)
#pylab.plot([T[i], T[j]], lgen(numpy.sign(Cp)), 'k--')
pylab.fill(T[i:j], C[i:j], color=col)
pylab.plot(Tp, Cp, 'kx')
pylab.plot(Tmean, 0, 'k*')
pylab.xlim(plotTlim)
Cinrange= C[(T>plotTlim[0])&(T<plotTlim[1])]
a=Cinrange.min()
b=Cinrange.max()
pylab.ylim(a-0.02*(b-a), b+0.02*(b-a))
pylab.savefig(os.path.join(os.path.join(savef, 'cell%02d' %selectcell),'Cpregions_cell%02d_%s.png' %(selectcell, metadict['name'])))
pylab.clf()
#orderarray=numpy.abs(numpy.array(cool400))
cols=['k', 'b', 'g', 'r', 'c', 'm']
### plotting series of heat ramps
#mult=1.e6
#nplots=len(orderarray)
#axl=[pylab.subplot(nplots, 1, nplots)]
#for i in range(1, nplots):
# #ax=pylab.subplot2grid((n, 3), (n-1-i, 0), colspan=2, sharex=axl[0], sharey=axl[0])
# ax=pylab.subplot(nplots, 1, nplots-i, sharex=axl[0], sharey=axl[0])
# pylab.setp(ax.get_xticklabels(), visible=False)
# axl+=[ax]
#for count, i in enumerate(numpy.argsort(orderarray)):
# hi, hseg, ci, cseg=heati_heatseg_prevcooli_prevcoolseg[i]
# print hi, hseg, heatlist[hi], allsegdict[hi][hseg].keys()
# axl[count].plot(allsegdict[hi][hseg]['sampletemperature'][cycleindex], mult*allsegdict[hi][hseg]['sampleheatcapacity'][cycleindex], cols[count]+'.', markersize=1, label=heatlist[i])
#for ax in axl:
# ax.set_ylim(-2.1, 4.9)
# ax.set_yticks([-2, 0, 2, 4])
#
#axl[2].set_ylabel(r'Heat Capacity ($\mu$J/K), endothermic ->', fontsize=14)
#axl[0].set_xlabel('Temperature (C)', fontsize=14)
#pylab.subplots_adjust(right=.95, top=0.95, hspace=0.01)
###plot cooling rates
#pylab.figure(figsize=(1.5, 8))
#for count, x in enumerate(numpy.sort(orderarray)):
# pylab.semilogx(numpy.abs(x), count, cols[count]+'o')
#make_ticklabels_invisible(pylab.gca(), x=False)
#pylab.xlabel('cooling rate at 400C (K/s)', fontsize=14)
#pylab.ylim(-.5, count+.5)
###extra stuff?
#pylab.ylim(t3, t4)
# pylab.xlabel('T (C)')
# pylab.ylabel('P / dT/dt')
# pylab.gca().yaxis.set_major_formatter(ExpTickLabels)
# pylab.subplots_adjust(left=.1, right=.97, top=.93, bottom=.08, wspace=.25, hspace=.25)
## pylab.show()
## idialog=messageDialog(title='continue')
## if not idialog.exec_():
## break
## break
# pylab.savefig(os.path.join(savef,'SCcellplot_cell%02d' %(cellcount+1)+'.png'))
# pylab.clf()
#pylab.show()
print 'done'
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""test foreground and background colors"""
import time
from colored import fg, bg, attr
def main():
colors = (
"#000000",
"#800000",
"#008000",
"#808000",
"#000080",
"#800080",
"#008080",
"#c0c0c0",
"#808080",
"#ff0000",
"#00ff00",
"#ffff00",
"#0000ff",
"#ff00ff",
"#00ffff",
"#ffffff",
"#000000",
"#00005f",
"#000087",
"#0000af",
"#0000d7",
"#0000ff",
"#005f00",
"#005f5f",
"#005f87",
"#005faf",
"#005fd7",
"#005fff",
"#008700",
"#00875f",
"#008787",
"#0087af",
"#0087d7",
"#0087ff",
"#00af00",
"#00af5f",
"#00af87",
"#00afaf",
"#00afd7",
"#00afff",
"#00d700",
"#00d75f",
"#00d787",
"#00d7af",
"#00d7d7",
"#00d7ff",
"#00ff00",
"#00ff5f",
"#00ff87",
"#00ffaf",
"#00ffd7",
"#00ffff",
"#5f0000",
"#5f005f",
"#5f0087",
"#5f00af",
"#5f00d7",
"#5f00ff",
"#5f5f00",
"#5f5f5f",
"#5f5f87",
"#5f5faf",
"#5f5fd7",
"#5f5fff",
"#5f8700",
"#5f875f",
"#5f8787",
"#5f87af",
"#5f87d7",
"#5f87ff",
"#5faf00",
"#5faf5f",
"#5faf87",
"#5fafaf",
"#5fafd7",
"#5fafff",
"#5fd700",
"#5fd75f",
"#5fd787",
"#5fd7af",
"#5fd7d7",
"#5fd7ff",
"#5fff00",
"#5fff5f",
"#5fff87",
"#5fffaf",
"#5fffd7",
"#5fffff",
"#870000",
"#87005f",
"#870087",
"#8700af",
"#8700d7",
"#8700ff",
"#875f00",
"#875f5f",
"#875f87",
"#875faf",
"#875fd7",
"#875fff",
"#878700",
"#87875f",
"#878787",
"#8787af",
"#8787d7",
"#8787ff",
"#87af00",
"#87af5f",
"#87af87",
"#87afaf",
"#87afd7",
"#87afff",
"#87d700",
"#87d75f",
"#87d787",
"#87d7af",
"#87d7d7",
"#87d7ff",
"#87ff00",
"#87ff5f",
"#87ff87",
"#87ffaf",
"#87ffd7",
"#87ffff",
"#af0000",
"#af005f",
"#af0087",
"#af00af",
"#af00d7",
"#af00ff",
"#af5f00",
"#af5f5f",
"#af5f87",
"#af5faf",
"#af5fd7",
"#af5fff",
"#af8700",
"#af875f",
"#af8787",
"#af87af",
"#af87d7",
"#af87ff",
"#afaf00",
"#afaf5f",
"#afaf87",
"#afafaf",
"#afafd7",
"#afafff",
"#afd700",
"#afd75f",
"#afd787",
"#afd7af",
"#afd7d7",
"#afd7ff",
"#afff00",
"#afff5f",
"#afff87",
"#afffaf",
"#afffd7",
"#afffff",
"#d70000",
"#d7005f",
"#d70087",
"#d700af",
"#d700d7",
"#d700ff",
"#d75f00",
"#d75f5f",
"#d75f87",
"#d75faf",
"#d75fd7",
"#d75fff",
"#d78700",
"#d7875f",
"#d78787",
"#d787af",
"#d787d7",
"#d787ff",
"#d7af00",
"#d7af5f",
"#d7af87",
"#d7afaf",
"#d7afd7",
"#d7afff",
"#d7d700",
"#d7d75f",
"#d7d787",
"#d7d7af",
"#d7d7d7",
"#d7d7ff",
"#d7ff00",
"#d7ff5f",
"#d7ff87",
"#d7ffaf",
"#d7ffd7",
"#d7ffff",
"#ff0000",
"#ff005f",
"#ff0087",
"#ff00af",
"#ff00d7",
"#ff00ff",
"#ff5f00",
"#ff5f5f",
"#ff5f87",
"#ff5faf",
"#ff5fd7",
"#ff5fff",
"#ff8700",
"#ff875f",
"#ff8787",
"#ff87af",
"#ff87d7",
"#ff87ff",
"#ffaf00",
"#ffaf5f",
"#ffaf87",
"#ffafaf",
"#ffafd7",
"#ffafff",
"#ffd700",
"#ffd75f",
"#ffd787",
"#ffd7af",
"#ffd7d7",
"#ffd7ff",
"#ffff00",
"#ffff5f",
"#ffff87",
"#ffffaf",
"#ffffd7",
"#ffffff",
"#080808",
"#121212",
"#1c1c1c",
"#262626",
"#303030",
"#3a3a3a",
"#444444",
"#4e4e4e",
"#585858",
"#626262",
"#6c6c6c",
"#767676",
"#808080",
"#8a8a8a",
"#949494",
"#9e9e9e",
"#a8a8a8",
"#b2b2b2",
"#bcbcbc",
"#c6c6c6",
"#d0d0d0",
"#dadada",
"#e4e4e4",
"#eeeeee"
)
for color in colors:
print(
"{}This text is colored: {}{}".format(
fg(color),
color,
attr("reset")))
print(
"{}This text is colored: {}{}".format(
bg(color),
color,
attr("reset")))
time.sleep(0.1)
if __name__ == "__main__":
main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug wrapper sessions.
A debug wrapper session is a wrapper around a TensorFlow Python Session.
The wrapper preserves the Session interface, most importantly the run() method,
while providing abilities to:
a) Intercept a run() call to a wrapped session and insert debug tensor watches
according to externally-specified debug URLs.
b) Release control to an external (i.e., non-Session) object before and after
the run() call, so that the external object can perform actions such as
launching a UI to let users inspect the intermediate tensors and partition
graphs from the run() call.
c) (To be implemented) Intercept a run() call and give control to DebugStepper
to let it perform stepping / continuing-to actions on the graph.
b) (To be implemented in a future CL) Enter an instruction loop to let an
external object (e.g., remote client) launch run() and cont() calls
remotely.
*** The lifetime of a debug wrapper session: ***
1) The wrapper session is created by calling the constructor with a
wrapped (normal) session as the argument:
wrapper = FooDebugWrapperSession(sess)
wherein FooDebugWrapperSession is a concrete subclass implementing the
abstract BaseDebugWrapperSession class below.
2) Near the end of the constructor call, the on_session_init() callback is
invoked, with a OnSessionInitRequest object as the argument. The object
carries the wrapped (normal) session object.
3) The callback handles the request and returns a OnSessionInitResponse
object with an action field, directing the wrapper session what to do next.
If the action field in the OnSessionInitResponse is PROCEED, the constuctor
returns. Control is released back to the caller of the constructor, which can
invoke run() method of wrapper session with the same syntax as a non-wrapped
session, e.g.,:
wrapper.run(fetches, feed_dict=feeds, options=run_options)
Below, A1 - A2 is the lifetime of a wrapper run() call if the action is
PROCEED:
A1) Right at the start of each run() call, the on_run_start() callback is
invoked, with an OnRunStartRequest object carrying information such as
the fetches, the feed dict, the run options and run metadata used in
this run call, along with a count of how many run calls has occurred
on this wrapper session. The callback then returns an OnRunStartResponse
object, of which the action field directs what the wrapper session
actually will do of the run() call.
If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue,
with the debug URLs supplied in the debug_urls field of the response.
These can be file:// or grpc:// URLs, for example.
If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue.
If the action is INVOKE_STEPPER, no run() call will be issued to the
wrapped session. But instead, a DebugStepper (i.e., "continuation
debugger") will be used to perform stepping / continue-to actions on
the graph.
TODO(cais): The event loop for the DebugStepper will request additional
callbacks including on_cont_start() and on_cont_end(). Add those.
A2) Right before the run() returns, the on_run_end() callback is invoked,
with an OnRunEndRequest object as the argument, which carries information
including the actual action performed in the warpper run() call and the
run_metadata from the run() call.
However, if the action field in OnSessionInitResponse is
REMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop
that gives the control to a remote caller.
In the remote instruction loop, the following steps will happen:
B1) Callback on_instr_start() is invoked. The callback will return an
OnInstrStartResponse object with an action field which can order one of
the following actions:
i) a run() call with fetches, feeds and debug_urls specified.
ii) a DebugStepper cont() call with target specified.
iii) value overrides in the cached tensors from the DebugStepper.
iv) exit the instruction loop.
B2) The wrapper session carries out the action specified above.
B3) If still in the instruction loop, the wrapper session invokes the
on_instr_end() callback. After the on_instr_end() callback returns, jump
back to B1.
TODO(cais): Implemented the instruction loop in B1 - B3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug import debug_utils
from tensorflow.python.framework import errors
# Helper function.
def _check_type(obj, expected_type):
"""Check if an object is of the expected type.
Args:
obj: The object being checked.
expected_type: (type) The expected type of obj.
Raises:
TypeError: If obj is not an instance of expected_type.
"""
if not isinstance(obj, expected_type):
raise TypeError("Expected type %s; got type %s" %
(expected_type, type(obj)))
class OnSessionInitRequest(object):
"""Request to an on-session-init callback.
This callback is invoked during the __init__ call to a debug-wrapper session.
"""
def __init__(self, sess):
"""Constructor.
Args:
sess: A tensorflow Session object.
"""
_check_type(sess, session.BaseSession)
self.session = sess
class OnSessionInitAction(object):
"""Enum-like values for possible action to take on session init."""
# Proceed, without special actions, in the wrapper session initializaton. What
# action the wrapper session performs next is determined by the caller of the
# wrapper session. E.g., it can call run().
PROCEED = "proceed"
# Instead of letting the caller of the wrapper session determine what actions
# the wrapper session will perform next, enter a loop to receive instructions
# from a remote client.
# For example, TensorBoard visual debugger can use this action so that it can
# launch session.run() calls remotely.
REMOTE_INSTR_LOOP = "remote_instr_loop"
class OnSessionInitResponse(object):
"""Response from an on-session-init callback."""
def __init__(self, action):
"""Constructor.
Args:
action: (OnSessionInitAction) Debugger action to take on session init.
"""
_check_type(action, str)
self.action = action
class OnRunStartRequest(object):
"""Request to an on-run-start callback.
This callback is invoked during a run() call of the debug-wrapper
session, immediately after the run() call counter is incremented.
"""
def __init__(self, fetches, feed_dict, run_options, run_metadata,
run_call_count):
"""Constructor of OnRunStartRequest.
Args:
fetches: Fetch targets of the run() call.
feed_dict: The feed dictionary to the run() call.
run_options: RunOptions input to the run() call.
run_metadata: RunMetadata input to the run() call.
The above four arguments are identical to the input arguments to the
run() method of a non-wrapped TensorFlow session.
run_call_count: 1-based count of how many run calls (including this one)
has been invoked.
"""
self.fetches = fetches
self.feed_dict = feed_dict
self.run_options = run_options
self.run_metadata = run_metadata
self.run_call_count = run_call_count
class OnRunStartAction(object):
"""Enum-like values for possible action to take on start of a run() call."""
# Run once with debug tensor-watching.
DEBUG_RUN = "debug_run"
# Run without debug tensor-watching.
NON_DEBUG_RUN = "non_debug_run"
# Instead of running the fetches as a whole, as would normally happen, invoke
# the (to-be-implemented) debug stepper.
# TODO(cais): Remove "to-be-implemented".
INVOKE_STEPPER = "invoke_stepper"
class OnRunStartResponse(object):
"""Request from an on-run-start callback.
The caller of the callback can use this response object to specify what
action the debug-wrapper session actually takes on the run() call.
"""
def __init__(self, action, debug_urls):
"""Constructor of OnRunStartResponse.
Args:
action: (OnRunStartAction) the action actually taken by the wrapped
session for the run() call.
debug_urls: (list of str) debug_urls used in watching the tensors during
the run() call.
"""
_check_type(action, str)
self.action = action
_check_type(debug_urls, list)
self.debug_urls = debug_urls
class OnRunEndRequest(object):
"""Request to an on-run-end callback.
The callback is invoked immediately before the wrapped run() call ends.
"""
def __init__(self,
performed_action,
run_metadata=None,
client_graph_def=None,
tf_error=None):
"""Constructor for OnRunEndRequest.
Args:
performed_action: (OnRunStartAction) Actually-performed action by the
debug-wrapper session.
run_metadata: run_metadata output from the run() call (if any).
client_graph_def: (GraphDef) GraphDef from the client side, i.e., from
the python front end of TensorFlow. Can be obtained with
session.graph.as_graph_def().
tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred
during the run (if any).
"""
_check_type(performed_action, str)
self.performed_action = performed_action
if run_metadata is not None:
_check_type(run_metadata, config_pb2.RunMetadata)
self.run_metadata = run_metadata
self.client_graph_def = client_graph_def
self.tf_error = tf_error
class OnRunEndResponse(object):
"""Response from an on-run-end callback."""
def __init__(self):
# Currently only a placeholder.
pass
class BaseDebugWrapperSession(session.SessionInterface):
"""Base class of debug-wrapper session classes.
Concrete classes that inherit from this class need to implement the abstract
methods such as on_session_init, on_run_start and on_run_end.
"""
# TODO(cais): Add on_cont_start and on_cont_end callbacks once the stepper is
# is available.
def __init__(self, sess):
"""Constructor of BaseDebugWrapperSession.
Args:
sess: An (unwrapped) TensorFlow session instance.
Raises:
ValueError: On invalid OnSessionInitAction value.
"""
_check_type(sess, session.BaseSession)
# The session being wrapped.
self._sess = sess
# Keeps track of number of run calls that have been performed on this
# debug-wrapper session.
self._run_call_count = 0
# Invoke on-session-init callback.
response = self.on_session_init(OnSessionInitRequest(self._sess))
_check_type(response, OnSessionInitResponse)
if response.action == OnSessionInitAction.PROCEED:
pass
elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:
# TODO(cais): Implement REMOTE_INSTR_LOOP
raise NotImplementedError(
"OnSessionInitAction REMOTE_INSTR_LOOP has not been "
"implemented.")
else:
raise ValueError(
"Invalid OnSessionInitAction value: %s" % response.action)
@property
def graph(self):
return self._sess.graph
@property
def sess_str(self):
return self._sess.sess_str
@property
def session(self):
return self._sess
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Wrapper around Session.run() that inserts tensor watch options.
Args:
fetches: Same as the fetches arg to regular Session.run()
feed_dict: Same as the feed_dict arg to regular Session.run()
options: Same as the options arg to regular Session.run()
run_metadata: Same as the run_metadata to regular Session.run()
Returns:
Simply forwards the output of the wrapped Session.run() call.
Raises:
ValueError: On invalid OnRunStartAction value.
"""
self._run_call_count += 1
# Invoke on-run-start callback and obtain response.
run_start_resp = self.on_run_start(
OnRunStartRequest(fetches, feed_dict, options, run_metadata,
self._run_call_count))
_check_type(run_start_resp, OnRunStartResponse)
if run_start_resp.action == OnRunStartAction.DEBUG_RUN:
# Decorate RunOption to fill in debugger tensor watch specifications.
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
self._decorate_run_options(decorated_run_options,
run_start_resp.debug_urls)
# Invoke the run() method of the wrapped Session. Catch any TensorFlow
# runtime errors.
tf_error = None
try:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
except errors.OpError as op_error:
tf_error = op_error
retvals = op_error
run_end_req = OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def(),
tf_error=tf_error)
elif run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN:
# Invoke run() method of the wrapped session.
retvals = self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Prepare arg for the on-run-end callback.
run_end_req = OnRunEndRequest(run_start_resp.action)
elif run_start_resp.action == OnRunStartAction.INVOKE_STEPPER:
# TODO(cais): Implement stepper loop.
raise NotImplementedError(
"OnRunStartAction INVOKE_STEPPER has not been implemented.")
else:
raise ValueError(
"Invalid OnRunStartAction value: %s" % run_start_resp.action)
# Invoke on-run-end callback and obtain response.
run_end_resp = self.on_run_end(run_end_req)
_check_type(run_end_resp, OnRunEndResponse)
# Currently run_end_resp is only a placeholder. No action is taken on it.
return retvals
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError(
"partial_run_setup is not implemented for debug-wrapper sessions.")
def partial_run(self, handle, fetches, feed_dict=None):
raise NotImplementedError(
"partial_run is not implemented for debug-wrapper sessions.")
def _decorate_run_options(self, run_options, debug_urls):
"""Modify a RunOptions object for debug tensor watching.
Specifies request for outputting partition graphs. Adds
debug_tensor_watch_opts with proper debug URLs.
Args:
run_options: (RunOptions) the modified RunOptions object.
debug_urls: (list of str) debug URLs to be entered in run_options.
debug_tensor_watch_opts.
"""
run_options.output_partition_graphs = True
debug_utils.watch_graph(
run_options, self._sess.graph, debug_urls=debug_urls)
@abc.abstractmethod
def on_session_init(self, request):
"""Callback invoked during construction of the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the constructor ends.
Args:
request: (OnSessionInitRequest) callback request carrying information
such as the session being wrapped.
Returns:
An instance of OnSessionInitResponse.
"""
pass
@abc.abstractmethod
def on_run_start(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens after the wrapper's run() call is entered,
after an increment of run call counter.
Args:
request: (OnRunStartRequest) callback request object carrying information
about the run call such as the fetches, feed dict, run options, run
metadata, and how many run() calls to this wrapper session has occurred.
Returns:
An instance of OnRunStartResponse, carrying information to
1) direct the wrapper session to perform a specified action (e.g., run
with or without debug tensor watching, invoking the stepper.)
2) debug URLs used to watch the tensors.
"""
pass
@abc.abstractmethod
def on_run_end(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the wrapper exits its run() call.
Args:
request: (OnRunEndRequest) callback request object carrying information
such as the actual action performed by the session wrapper for the
run() call.
Returns:
An instance of OnRunStartResponse.
"""
pass
# TODO(cais): Add _node_name_regex_whitelist and
# _node_op_type_regex_whitelist.
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
import frappe.utils
import frappe.share
import frappe.defaults
import frappe.desk.form.meta
from frappe.model.utils.user_settings import get_user_settings
from frappe.permissions import get_doc_permissions
from frappe import _
@frappe.whitelist()
def getdoc(doctype, name, user=None):
"""
Loads a doclist for a given document. This method is called directly from the client.
Requries "doctype", "name" as form variables.
Will also call the "onload" method on the document.
"""
if not (doctype and name):
raise Exception('doctype and name required!')
if not name:
name = doctype
if not frappe.db.exists(doctype, name):
return []
try:
doc = frappe.get_doc(doctype, name)
run_onload(doc)
if not doc.has_permission("read"):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(doctype + ' ' + name))
raise frappe.PermissionError(("read", doctype, name))
doc.apply_fieldlevel_read_permissions()
# add file list
doc.add_viewed()
get_docinfo(doc)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
raise
if doc and not name.startswith('_'):
frappe.get_user().update_recent(doctype, name)
doc.add_seen()
frappe.response.docs.append(doc)
@frappe.whitelist()
def getdoctype(doctype, with_parent=False, cached_timestamp=None):
"""load doctype"""
docs = []
parent_dt = None
# with parent (called from report builder)
if with_parent:
parent_dt = frappe.model.meta.get_parent_dt(doctype)
if parent_dt:
docs = get_meta_bundle(parent_dt)
frappe.response['parent_dt'] = parent_dt
if not docs:
docs = get_meta_bundle(doctype)
frappe.response['user_settings'] = get_user_settings(parent_dt or doctype)
if cached_timestamp and docs[0].modified==cached_timestamp:
return "use_cache"
frappe.response.docs.extend(docs)
def get_meta_bundle(doctype):
bundle = [frappe.desk.form.meta.get_meta(doctype)]
for df in bundle[0].fields:
if df.fieldtype=="Table":
bundle.append(frappe.desk.form.meta.get_meta(df.options, not frappe.conf.developer_mode))
return bundle
@frappe.whitelist()
def get_docinfo(doc=None, doctype=None, name=None):
if not doc:
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
frappe.response["docinfo"] = {
"attachments": get_attachments(doc.doctype, doc.name),
"communications": _get_communications(doc.doctype, doc.name),
'total_comments': len(json.loads(doc.get('_comments') or '[]')),
'versions': get_versions(doc),
"assignments": get_assignments(doc.doctype, doc.name),
"permissions": get_doc_permissions(doc),
"shared": frappe.share.get_users(doc.doctype, doc.name),
"rating": get_feedback_rating(doc.doctype, doc.name),
"views": get_view_logs(doc.doctype, doc.name)
}
def get_attachments(dt, dn):
return frappe.get_all("File", fields=["name", "file_name", "file_url", "is_private"],
filters = {"attached_to_name": dn, "attached_to_doctype": dt})
def get_versions(doc):
return frappe.get_all('Version', filters=dict(ref_doctype=doc.doctype, docname=doc.name),
fields=['name', 'owner', 'creation', 'data'], limit=10, order_by='creation desc')
@frappe.whitelist()
def get_communications(doctype, name, start=0, limit=20):
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
return _get_communications(doctype, name, start, limit)
def _get_communications(doctype, name, start=0, limit=20):
communications = get_communication_data(doctype, name, start, limit)
for c in communications:
if c.communication_type=="Communication":
c.attachments = json.dumps(frappe.get_all("File",
fields=["file_url", "is_private"],
filters={"attached_to_doctype": "Communication",
"attached_to_name": c.name}
))
elif c.communication_type=="Comment" and c.comment_type=="Comment":
c.content = frappe.utils.markdown(c.content)
return communications
def get_communication_data(doctype, name, start=0, limit=20, after=None, fields=None,
group_by=None, as_dict=True):
'''Returns list of communications for a given document'''
if not fields:
fields = '''`name`, `communication_type`,`communication_medium`, `comment_type`,
`communication_date`, `content`, `sender`, `sender_full_name`,
`creation`, `subject`, `delivery_status`, `_liked_by`,
`timeline_doctype`, `timeline_name`, `reference_doctype`, `reference_name`,
`link_doctype`, `link_name`, `read_by_recipient`, `rating` '''
conditions = '''communication_type in ('Communication', 'Comment', 'Feedback')
and (
(reference_doctype=%(doctype)s and reference_name=%(name)s)
or (
(timeline_doctype=%(doctype)s and timeline_name=%(name)s)
and (
communication_type='Communication'
or (
communication_type='Comment'
and comment_type in ('Created', 'Updated', 'Submitted', 'Cancelled', 'Deleted')
)))
)'''
if after:
# find after a particular date
conditions+= ' and creation > {0}'.format(after)
if doctype=='User':
conditions+= " and not (reference_doctype='User' and communication_type='Communication')"
communications = frappe.db.sql("""select {fields}
from `tabCommunication`
where {conditions} {group_by}
order by creation desc LIMIT %(limit)s OFFSET %(start)s""".format(
fields = fields, conditions=conditions, group_by=group_by or ""),
{ "doctype": doctype, "name": name, "start": frappe.utils.cint(start), "limit": limit },
as_dict=as_dict)
return communications
def get_assignments(dt, dn):
cl = frappe.db.sql("""select `name`, owner, description from `tabToDo`
where reference_type=%(doctype)s and reference_name=%(name)s and status='Open'
order by modified desc limit 5""", {
"doctype": dt,
"name": dn
}, as_dict=True)
return cl
@frappe.whitelist()
def get_badge_info(doctypes, filters):
filters = json.loads(filters)
doctypes = json.loads(doctypes)
filters["docstatus"] = ["!=", 2]
out = {}
for doctype in doctypes:
out[doctype] = frappe.db.get_value(doctype, filters, "count(*)")
return out
def run_onload(doc):
doc.set("__onload", frappe._dict())
doc.run_method("onload")
def get_feedback_rating(doctype, docname):
""" get and return the latest feedback rating if available """
rating= frappe.get_all("Communication", filters={
"reference_doctype": doctype,
"reference_name": docname,
"communication_type": "Feedback"
}, fields=["rating"], order_by="creation desc", as_list=True)
if not rating:
return 0
else:
return rating[0][0]
def get_view_logs(doctype, docname):
""" get and return the latest view logs if available """
logs = []
if hasattr(frappe.get_meta(doctype), 'track_views') and frappe.get_meta(doctype).track_views:
view_logs = frappe.get_all("View log", filters={
"reference_doctype": doctype,
"reference_name": docname,
}, fields=["name", "creation"], order_by="creation desc")
if view_logs:
logs = view_logs
return logs
| |
from collections import defaultdict
from itertools import chain
import logging
from networkx import NetworkXError
from cle.backends.cgc import CGC
from .errors import IdentifierException
from .functions import Functions
from .runner import Runner
from .. import Analysis
from ... import options
from ...errors import AngrError, SimSegfaultError, SimEngineError, SimMemoryError, SimError
l = logging.getLogger(name=__name__)
NUM_TESTS = 5
class FuncInfo(object):
def __init__(self):
self.stack_vars = None
self.stack_var_accesses = None
self.frame_size = None
self.pushed_regs = None
self.stack_args = None
self.stack_arg_accesses = None
self.buffers = None
self.var_args = None
self.bp_based = None
self.bp_sp_diff = None
self.accesses_ret = None
self.preamble_sp_change = None
class Identifier(Analysis):
_special_case_funcs = ["free"]
def __init__(self, cfg=None, require_predecessors=True, only_find=None):
# self.project = project
if not isinstance(self.project.loader.main_object, CGC):
l.critical("The identifier currently works only on CGC binaries. Results may be completely unexpected.")
if cfg is not None:
self._cfg = cfg
else:
self._cfg = self.project.analyses.CFGFast(resolve_indirect_jumps=True)
self._runner = Runner(self.project, self._cfg)
# only find if in this set
self.only_find = only_find
# reg list
a = self.project.arch
self._sp_reg = a.register_names[a.sp_offset]
self._bp_reg = a.register_names[a.bp_offset]
self._ip_reg = a.register_names[a.ip_offset]
self._reg_list = a.default_symbolic_registers
self._reg_list = [r for r in self._reg_list if r not in (self._sp_reg, self._ip_reg)]
self.matches = dict()
self.callsites = None
self.inv_callsites = None
self.func_info = dict()
self.block_to_func = dict()
self.map_callsites()
if self._too_large():
l.warning("Too large")
return
self.base_symbolic_state = self.make_symbolic_state(self.project, self._reg_list)
self.base_symbolic_state.options.discard(options.SUPPORT_FLOATING_POINT)
self.base_symbolic_state.regs.bp = self.base_symbolic_state.solver.BVS("sreg_" + "ebp" + "-", self.project.arch.bits)
for f in self._cfg.functions.values():
if f.is_syscall:
continue
if self.project.is_hooked(f.addr):
continue
# skip if no predecessors
try:
if require_predecessors and len(list(self._cfg.functions.callgraph.predecessors(f.addr))) == 0:
continue
except NetworkXError:
if require_predecessors:
continue
# find the actual vars
try:
func_info = self.find_stack_vars_x86(f)
self.func_info[f] = func_info
except (SimEngineError, SimMemoryError) as e:
l.debug("angr translation error: %s", e)
except IdentifierException as e:
l.debug("Identifier error: %s", e)
except SimError as e:
l.debug("Simulation error: %s", e)
def _too_large(self):
if len(self._cfg.functions) > 400:
return True
return False
def run(self, only_find=None):
if only_find is not None:
self.only_find = only_find
if self._too_large():
l.warning("Too large")
return
for f in self._cfg.functions.values():
if f.is_syscall:
continue
match = self.identify_func(f)
if match is not None:
match_func = match
match_name = match_func.get_name()
if f.name is not None:
l.debug("Found match for function %s at %#x, %s", f.name, f.addr, match_name)
else:
l.debug("Found match for function %#x, %s", f.addr, match_name)
self.matches[f] = match_name, match_func
if match_name != "malloc" and match_name != "free":
yield f.addr, match_name
else:
if f.name is not None:
l.debug("No match for function %s at %#x", f.name, f.addr)
else:
l.debug("No match for function %#x", f.addr)
# Special case functions
for name in Identifier._special_case_funcs:
func = Functions[name]()
for f in self._cfg.functions.values():
if f in self.matches:
continue
if f not in self.func_info:
continue
if self.func_info[f] is None:
continue
if len(self.func_info[f].stack_args) != func.num_args():
continue
if self._non_normal_args(self.func_info[f].stack_args):
continue
try:
result = func.try_match(f, self, self._runner)
except IdentifierException as e:
l.warning('Encountered IdentifierException trying to analyze %#x, reason: %s',
f.addr, e)
continue
except SimSegfaultError:
continue
except SimError as e:
l.warning("SimError %s", e)
continue
except AngrError as e:
l.warning("AngrError %s", e)
continue
if result:
self.matches[f] = func.get_name(), func
yield f.addr, func.get_name()
# fixup malloc/free
to_remove = []
for f, (match_name, match_func) in self.matches.items():
if match_name == "malloc" or match_name == "free":
if not self.can_call_same_name(f.addr, match_name):
yield f.addr, match_func.get_name()
else:
to_remove.append(f)
for f in to_remove:
del self.matches[f]
def can_call_same_name(self, addr, name):
if addr not in self._cfg.functions.callgraph.nodes():
return False
seen = set()
to_process = [addr]
while to_process:
curr = to_process.pop()
if curr in seen:
continue
seen.add(curr)
succ = list(self._cfg.functions.callgraph.successors(curr))
for s in succ:
if s in self._cfg.functions:
f = self._cfg.functions[s]
if f in self.matches:
if self.matches[f][0] == name:
return True
to_process.extend(succ)
return False
def get_func_info(self, func):
if isinstance(func, int):
func = self._cfg.functions[func]
if func not in self.func_info:
return None
return self.func_info[func]
@staticmethod
def constrain_all_zero(before_state, state, regs):
for r in regs:
state.add_constraints(before_state.registers.load(r) == 0)
def identify_func(self, function):
l.debug("function at %#x", function.addr)
if function.is_syscall:
return None
func_info = self.get_func_info(function)
if func_info is None:
l.debug("func_info is none")
return None
l.debug("num args %d", len(func_info.stack_args))
try:
calls_other_funcs = len(list(self._cfg.functions.callgraph.successors(function.addr))) > 0
except NetworkXError:
calls_other_funcs = False
for name, f in Functions.items():
# check if we should be finding it
if self.only_find is not None and name not in self.only_find:
continue
if name in Identifier._special_case_funcs:
continue
# generate an object of the class
f = f()
# test it
if f.num_args() != len(func_info.stack_args) or f.var_args() != func_info.var_args:
continue
if calls_other_funcs and not f.can_call_other_funcs():
continue
l.debug("testing: %s", name)
if not self.check_tests(function, f):
continue
# match!
return f
if len(func_info.stack_args) == 2 and func_info.var_args and len(function.graph.nodes()) < 5:
match = Functions["fdprintf"]()
l.warning("%#x assuming fd printf for var_args func with 2 args although we don't really know", function.addr)
return match
return None
def check_tests(self, cfg_func, match_func):
try:
if not match_func.pre_test(cfg_func, self._runner):
return False
for _ in range(NUM_TESTS):
test_data = match_func.gen_input_output_pair()
if test_data is not None and not self._runner.test(cfg_func, test_data):
return False
return True
except SimSegfaultError:
return False
except SimError as e:
l.warning("SimError %s", e)
return False
except AngrError as e:
l.warning("AngrError %s", e)
return False
def map_callsites(self):
callsites = dict()
for f in self._cfg.functions.values():
for callsite in f.get_call_sites():
if f.get_call_target(callsite) is None:
continue
callsites[callsite] = f.get_call_target(callsite)
self.callsites = callsites
# create inverse callsite map
self.inv_callsites = defaultdict(set)
for c, f in self.callsites.items():
self.inv_callsites[f].add(c)
# create map of blocks to the function they reside in
self.block_to_func = dict()
for f in self._cfg.functions.values():
for b in f.graph.nodes():
self.block_to_func[b.addr] = f
def do_trace(self, addr_trace, reverse_accesses, func_info): #pylint: disable=unused-argument
# get to the callsite
s = self.make_symbolic_state(self.project, self._reg_list, stack_length=200)
s.options.discard(options.AVOID_MULTIVALUED_WRITES)
s.options.discard(options.AVOID_MULTIVALUED_READS)
s.options.add(options.UNDER_CONSTRAINED_SYMEXEC)
s.options.discard(options.LAZY_SOLVES)
func_info = self.func_info[self.block_to_func[addr_trace[0]]]
for i in range(func_info.frame_size//self.project.arch.bytes+5):
s.stack_push(s.solver.BVS("var_" + hex(i), self.project.arch.bits))
if func_info.bp_based:
s.regs.bp = s.regs.sp + func_info.bp_sp_diff
s.regs.ip = addr_trace[0]
addr_trace = addr_trace[1:]
simgr = self.project.factory.simulation_manager(s, save_unconstrained=True)
while len(addr_trace) > 0:
simgr.stashes['unconstrained'] = []
simgr.step()
stepped = False
for ss in simgr.active:
# todo could write symbolic data to pointers passed to functions
if ss.history.jumpkind == "Ijk_Call":
ss.regs.eax = ss.solver.BVS("unconstrained_ret_%#x" % ss.addr, ss.arch.bits)
ss.regs.ip = ss.stack_pop()
ss.history.jumpkind = "Ijk_Ret"
if ss.addr == addr_trace[0]:
simgr.stashes['active'] = [ss]
stepped = True
break
if not stepped:
if len(simgr.unconstrained) > 0:
s = simgr.unconstrained[0]
if s.history.jumpkind == "Ijk_Call":
s.regs.eax = s.solver.BVS("unconstrained_ret", s.arch.bits)
s.regs.ip = s.stack_pop()
s.history.jumpkind = "Ijk_Ret"
s.regs.ip = addr_trace[0]
simgr.stashes['active'] = [s]
stepped = True
if not stepped:
raise IdentifierException("could not get call args")
addr_trace = addr_trace[1:]
# step one last time to the call
simgr.step()
if len(simgr.active) == 0:
raise IdentifierException("Didn't succeed call")
return simgr.active[0]
def get_call_args(self, func, callsite):
if isinstance(func, int):
func = self._cfg.functions[func]
func_info = self.func_info[func]
if len(func_info.stack_args) == 0:
return []
# get the accesses of calling func
calling_func = self.block_to_func[callsite]
reverse_accesses = dict()
calling_func_info = self.func_info[calling_func]
stack_var_accesses = calling_func_info.stack_var_accesses
for stack_var, v in stack_var_accesses.items():
for addr, ty in v:
reverse_accesses[addr] = (stack_var, ty)
# we need to step back as far as possible
start = calling_func.get_node(callsite)
addr_trace = []
while len(list(calling_func.transition_graph.predecessors(start))) == 1:
# stop at a call, could continue farther if no stack addr passed etc
prev_block = list(calling_func.transition_graph.predecessors(start))[0]
addr_trace = [start.addr] + addr_trace
start = prev_block
addr_trace = [start.addr] + addr_trace
succ_state = None
while len(addr_trace):
try:
succ_state = self.do_trace(addr_trace, reverse_accesses, calling_func_info)
break
except IdentifierException:
addr_trace = addr_trace[1:]
if len(addr_trace) == 0:
return None
arch_bytes = self.project.arch.bytes
args = []
for arg in func_info.stack_args:
arg_addr = succ_state.regs.sp + arg + arch_bytes
args.append(succ_state.memory.load(arg_addr, arch_bytes, endness=self.project.arch.memory_endness))
args_as_stack_vars = []
for a in args:
if not a.symbolic:
sp_off = succ_state.solver.eval(a-succ_state.regs.sp-arch_bytes)
if calling_func_info.bp_based:
bp_off = sp_off - calling_func_info.bp_sp_diff
else:
bp_off = sp_off - (calling_func_info.frame_size + self.project.arch.bytes) + self.project.arch.bytes
if abs(bp_off) < 0x1000:
args_as_stack_vars.append(bp_off)
else:
args_as_stack_vars.append(None)
else:
args_as_stack_vars.append(None)
return args, args_as_stack_vars
@staticmethod
def get_reg_name(arch, reg_offset):
"""
:param arch: the architecture
:param reg_offset: Tries to find the name of a register given the offset in the registers.
:return: The register name
"""
# todo does this make sense
if reg_offset is None:
return None
original_offset = reg_offset
while reg_offset >= 0 and reg_offset >= original_offset - (arch.bytes):
if reg_offset in arch.register_names:
return arch.register_names[reg_offset]
else:
reg_offset -= 1
return None
@staticmethod
def _make_regs_symbolic(input_state, reg_list, project):
"""
converts an input state into a state with symbolic registers
:return: the symbolic state
"""
state = input_state.copy()
# overwrite all registers
for reg in reg_list:
state.registers.store(reg, state.solver.BVS("sreg_" + reg + "-", project.arch.bits, explicit_name=True))
# restore sp
state.regs.sp = input_state.regs.sp
# restore bp
state.regs.bp = input_state.regs.bp
return state
def _prefilter_floats(self, func): #pylint: disable=no-self-use
# calling _get_block() from `func` respects the size of the basic block
# in extreme cases (like at the end of a section where VEX cannot disassemble the instruction beyond the
# section boundary), directly calling self.project.factory.block() on func.addr may lead to an AngrTranslationError.
bl = func._get_block(func.addr).vex
if any(c.type.startswith("Ity_F") for c in bl.all_constants):
raise IdentifierException("floating const")
def find_stack_vars_x86(self, func):
# could also figure out if args are buffers etc
# doesn't handle dynamically allocated stack, etc
if isinstance(func, int):
func = self._cfg.functions[func]
self._prefilter_floats(func)
if func.name is not None:
l.debug("finding stack vars %s", func.name)
else:
l.debug("finding stack vars %#x", func.addr)
if len(func.block_addrs_set) > 500:
raise IdentifierException("too many blocks")
if func.startpoint is None:
raise IdentifierException("Startpoint is None")
initial_state = self.base_symbolic_state.copy()
reg_dict = dict()
for r in self._reg_list + [self._bp_reg]:
reg_dict[hash(initial_state.registers.load(r))] = r
initial_state.regs.ip = func.startpoint.addr
# find index where stack value is constant
succs = self.project.factory.successors(initial_state)
succ = succs.all_successors[0]
if succ.history.jumpkind == "Ijk_Call":
goal_sp = succ.solver.eval(succ.regs.sp + self.project.arch.bytes)
# could be that this is wrong since we could be pushing args...
# so let's do a hacky check for pushes after a sub
bl = self.project.factory.block(func.startpoint.addr)
# find the sub sp
for i, insn in enumerate(bl.capstone.insns):
if str(insn.mnemonic) == "sub" and str(insn.op_str).startswith("esp"):
succ = self.project.factory.successors(initial_state, num_inst=i+1).all_successors[0]
goal_sp = succ.solver.eval(succ.regs.sp)
elif succ.history.jumpkind == "Ijk_Ret":
# here we need to know the min sp val
min_sp = initial_state.solver.eval(initial_state.regs.sp)
for i in range(self.project.factory.block(func.startpoint.addr).instructions):
succ = self.project.factory.successors(initial_state, num_inst=i).all_successors[0]
test_sp = succ.solver.eval(succ.regs.sp)
if test_sp < min_sp:
min_sp = test_sp
elif test_sp > min_sp:
break
goal_sp = min_sp
else:
goal_sp = succ.solver.eval(succ.regs.sp)
# find the end of the preamble
num_preamble_inst = None
succ = None
for i in range(0, self.project.factory.block(func.startpoint.addr).instructions):
if i == 0:
succ = initial_state
if i != 0:
succ = self.project.factory.successors(initial_state, num_inst=i).all_successors[0]
test_sp = succ.solver.eval(succ.regs.sp)
if test_sp == goal_sp:
num_preamble_inst = i
break
# hacky check for mov ebp esp
# happens when this is after the pushes...
if num_preamble_inst == 0:
end_addr = func.startpoint.addr
else:
end_addr = func.startpoint.addr + self.project.factory.block(func.startpoint.addr, num_inst=num_preamble_inst).size
if self._sets_ebp_from_esp(initial_state, end_addr):
num_preamble_inst += 1
succ = self.project.factory.successors(initial_state, num_inst=num_preamble_inst).all_successors[0]
min_sp = goal_sp
initial_sp = initial_state.solver.eval(initial_state.regs.sp)
frame_size = initial_sp - min_sp - self.project.arch.bytes
if num_preamble_inst is None or succ is None:
raise IdentifierException("preamble checks failed for %#x" % func.startpoint.addr)
bp_based = bool(len(succ.solver.eval_upto((initial_state.regs.sp - succ.regs.bp), 2)) == 1)
preamble_sp_change = succ.regs.sp - initial_state.regs.sp
if preamble_sp_change.symbolic:
raise IdentifierException("preamble sp change")
preamble_sp_change = initial_state.solver.eval(preamble_sp_change)
main_state = self._make_regs_symbolic(succ, self._reg_list, self.project)
if bp_based:
main_state = self._make_regs_symbolic(main_state, [self._bp_reg], self.project)
pushed_regs = []
for a in succ.history.recent_actions:
if a.type == "mem" and a.action == "write":
addr = succ.solver.eval(a.addr.ast)
if min_sp <= addr <= initial_sp:
if hash(a.data.ast) in reg_dict:
pushed_regs.append(reg_dict[hash(a.data.ast)])
pushed_regs = pushed_regs[::-1]
# found the preamble
# find the ends of the function
ends = set()
all_end_addrs = set()
if num_preamble_inst == 0:
end_preamble = func.startpoint.addr
preamble_addrs = set()
else:
preamble_block = self.project.factory.block(func.startpoint.addr, num_inst=num_preamble_inst)
preamble_addrs = set(preamble_block.instruction_addrs)
end_preamble = func.startpoint.addr + preamble_block.vex.size
for block in func.endpoints:
addr = block.addr
if addr in preamble_addrs:
addr = end_preamble
irsb = self.project.factory.block(addr).vex
if irsb.jumpkind == "Ijk_Ret":
cur_addr = None
found_end = False
for stmt in irsb.statements:
if stmt.tag == 'Ist_Imark':
cur_addr = stmt.addr
if found_end:
all_end_addrs.add(cur_addr)
elif not found_end and stmt.tag == 'Ist_Put':
if stmt.offset == self.project.arch.sp_offset:
found_end = True
ends.add(cur_addr)
all_end_addrs.add(cur_addr)
bp_sp_diff = None
if bp_based:
bp_sp_diff = main_state.solver.eval(main_state.regs.bp - main_state.regs.sp)
all_addrs = set()
for bl_addr in func.block_addrs:
all_addrs.update(set(self._cfg.get_any_node(bl_addr).instruction_addrs))
sp = main_state.solver.BVS("sym_sp", self.project.arch.bits, explicit_name=True)
main_state.regs.sp = sp
bp = None
if bp_based:
bp = main_state.solver.BVS("sym_bp", self.project.arch.bits, explicit_name=True)
main_state.regs.bp = bp
stack_vars = set()
stack_var_accesses = defaultdict(set)
buffers = set()
possible_stack_vars = []
for addr in all_addrs - all_end_addrs - preamble_addrs:
bl = self.project.factory.block(addr, num_inst=1)
if self._is_bt(bl):
continue
if self._is_jump_or_call(bl):
continue
if self._no_sp_or_bp(bl):
continue
main_state.ip = addr
try:
succ = self.project.factory.successors(main_state, num_inst=1).all_successors[0]
except SimError:
continue
written_regs = set()
# we can get stack variables via memory actions
for a in succ.history.recent_actions:
if a.type == "mem":
if "sym_sp" in a.addr.ast.variables or (bp_based and "sym_bp" in a.addr.ast.variables):
possible_stack_vars.append((addr, a.addr.ast, a.action))
if a.type == "reg" and a.action == "write":
# stack variables can also be if a stack addr is loaded into a register, eg lea
reg_name = self.get_reg_name(self.project.arch, a.offset)
# ignore bp if bp_based
if reg_name == self._bp_reg and bp_based:
continue
# ignore weird regs
if reg_name not in self._reg_list:
continue
# check if it was a stack var
if "sym_sp" in a.data.ast.variables or (bp_based and "sym_bp" in a.data.ast.variables):
possible_stack_vars.append((addr, a.data.ast, "load"))
written_regs.add(reg_name)
for addr, ast, action in possible_stack_vars:
if "sym_sp" in ast.variables:
# constrain all to be zero so we detect the base address of buffers
simplified = succ.solver.simplify(ast - sp)
if succ.solver.symbolic(simplified):
self.constrain_all_zero(main_state, succ, self._reg_list)
is_buffer = True
else:
is_buffer = False
sp_off = succ.solver.eval(simplified)
if sp_off > 2 ** (self.project.arch.bits - 1):
sp_off = 2 ** self.project.arch.bits - sp_off
# get the offsets
if bp_based:
bp_off = sp_off - bp_sp_diff
else:
bp_off = sp_off - (initial_sp-min_sp) + self.project.arch.bytes
stack_var_accesses[bp_off].add((addr, action))
stack_vars.add(bp_off)
if is_buffer:
buffers.add(bp_off)
else:
simplified = succ.solver.simplify(ast - bp)
if succ.solver.symbolic(simplified):
self.constrain_all_zero(main_state, succ, self._reg_list)
is_buffer = True
else:
is_buffer = False
bp_off = succ.solver.eval(simplified)
if bp_off > 2 ** (self.project.arch.bits - 1):
bp_off = -(2 ** self.project.arch.bits - bp_off)
stack_var_accesses[bp_off].add((addr, action))
stack_vars.add(bp_off)
if is_buffer:
buffers.add(bp_off)
stack_args = list()
stack_arg_accesses = defaultdict(set)
for v in stack_vars:
if v > 0:
stack_args.append(v - self.project.arch.bytes * 2)
stack_arg_accesses[v - self.project.arch.bytes * 2] = stack_var_accesses[v]
del stack_var_accesses[v]
stack_args = sorted(stack_args)
stack_vars = sorted(stack_vars)
if len(stack_args) > 0 and any(a[1] == "load" for a in stack_arg_accesses[stack_args[-1]]):
# print "DETECTED VAR_ARGS"
var_args = True
del stack_arg_accesses[stack_args[-1]]
stack_args = stack_args[:-1]
else:
var_args = False
for v in stack_vars:
if any(a[1] == "load" for a in stack_var_accesses[v]):
buffers.add(v)
if any(v > 0x10000 or v < -0x10000 for v in stack_vars):
raise IdentifierException("stack seems seems incorrect")
# return it all in a function info object
func_info = FuncInfo()
func_info.preamble_sp_change = preamble_sp_change
func_info.stack_vars = stack_vars
func_info.stack_var_accesses = stack_var_accesses
func_info.frame_size = frame_size
func_info.pushed_regs = pushed_regs
func_info.stack_args = stack_args
func_info.stack_arg_accesses = stack_arg_accesses
func_info.buffers = buffers
func_info.var_args = var_args
func_info.bp_based = bp_based
if func_info.bp_based:
func_info.bp_sp_diff = bp_sp_diff
self._filter_stack_args(func_info)
return func_info
def _sets_ebp_from_esp(self, state, addr):
state = state.copy()
state.regs.ip = addr
state.regs.sp = state.solver.BVS("sym_sp", 32, explicit_name=True)
succ = self.project.factory.successors(state).all_successors[0]
diff = state.regs.sp - succ.regs.bp
if not diff.symbolic:
return True
if len(diff.variables) > 1 or any("ebp" in v for v in diff.variables):
return False
if len(succ.solver.eval_upto((state.regs.sp - succ.regs.bp), 2)) == 1:
return True
return False
@staticmethod
def _is_bt(bl):
# vex does really weird stuff with bit test instructions
if bl.bytes.startswith(b"\x0f\xa3"):
return True
return False
@staticmethod
def _is_jump_or_call(bl):
if bl.vex.jumpkind != "Ijk_Boring":
return True
if len(bl.vex.constant_jump_targets) != 1:
return True
if next(iter(bl.vex.constant_jump_targets)) != bl.addr + bl.size:
return True
return False
def _no_sp_or_bp(self, bl):
for s in bl.vex.statements:
for e in chain([s], s.expressions):
if e.tag == "Iex_Get":
reg = self.get_reg_name(self.project.arch, e.offset)
if reg == "ebp" or reg == "esp":
return False
elif e.tag == "Ist_Put":
reg = self.get_reg_name(self.project.arch, e.offset)
if reg == "ebp" or reg == "esp":
return False
return True
@staticmethod
def _filter_stack_args(func_info):
if -4 in func_info.stack_args:
func_info.accesses_ret = True
func_info.stack_args = [x for x in func_info.stack_args if x != -4]
del func_info.stack_arg_accesses[-4]
else:
func_info.accesses_ret = False
if any(arg < 0 for arg in func_info.stack_args):
raise IdentifierException("negative arg")
@staticmethod
def _non_normal_args(stack_args):
for i, arg in enumerate(stack_args):
if arg != i*4:
return True
return False
@staticmethod
def make_initial_state(project, stack_length):
"""
:return: an initial state with a symbolic stack and good options for rop
"""
initial_state = project.factory.blank_state(
add_options={options.AVOID_MULTIVALUED_READS, options.AVOID_MULTIVALUED_WRITES,
options.NO_SYMBOLIC_JUMP_RESOLUTION, options.CGC_NO_SYMBOLIC_RECEIVE_LENGTH,
options.NO_SYMBOLIC_SYSCALL_RESOLUTION, options.TRACK_ACTION_HISTORY},
remove_options=options.resilience | options.simplification)
initial_state.options.discard(options.CGC_ZERO_FILL_UNCONSTRAINED_MEMORY)
initial_state.options.update({options.TRACK_REGISTER_ACTIONS, options.TRACK_MEMORY_ACTIONS,
options.TRACK_JMP_ACTIONS, options.TRACK_CONSTRAINT_ACTIONS})
symbolic_stack = initial_state.solver.BVS("symbolic_stack", project.arch.bits * stack_length)
initial_state.memory.store(initial_state.regs.sp, symbolic_stack)
if initial_state.arch.bp_offset != initial_state.arch.sp_offset:
initial_state.regs.bp = initial_state.regs.sp + 20 * initial_state.arch.bytes
initial_state.solver._solver.timeout = 500 # only solve for half a second at most
return initial_state
@staticmethod
def make_symbolic_state(project, reg_list, stack_length=80):
"""
converts an input state into a state with symbolic registers
:return: the symbolic state
"""
input_state = Identifier.make_initial_state(project, stack_length)
symbolic_state = input_state.copy()
# overwrite all registers
for reg in reg_list:
symbolic_state.registers.store(reg, symbolic_state.solver.BVS("sreg_" + reg + "-", project.arch.bits))
# restore sp
symbolic_state.regs.sp = input_state.regs.sp
# restore bp
symbolic_state.regs.bp = input_state.regs.bp
return symbolic_state
from angr.analyses import AnalysesHub
AnalysesHub.register_default('Identifier', Identifier)
| |
import contextlib
import json
import pathlib
from threading import Timer
from typing import Optional
import uuid
from bottle import Bottle, HTTPError, request, template, static_file
from ..apps import BaseApp
from ..geoip import GeoIPOO
from ..nodes import Manager as NodesManager, Node
from ..onionoo import getOnionoo
from ..plugin import SessionPlugin
from ..proxy import Proxy
from ..session import SessionManager, Session, make_short_id
from ..system import BaseSystem
from ..utils import AttributedDict
from ..version import VersionManager
# from tob.scheduler import Scheduler
class Dashboard(BaseApp):
def __init__(self
, sessions: SessionManager
, nodes: NodesManager
, proxy: Proxy
, version: VersionManager
, config: AttributedDict
, system: BaseSystem
, geoip: GeoIPOO):
super().__init__(sessions=sessions, nodes=nodes, proxy=proxy, version=version,
config=config)
self.system = system
self.cwd = pathlib.Path(self.config['cwd'])
# #####
# # GeoIP2 interface
self.geoip2 = geoip
# Will be overwritten if we operate a ControlCenter
self.default_page = "index.html"
#####
# TOR manpage Index Information
from ..manpage import ManPage
self.manpage = ManPage(str(self.cwd / 'tor' / 'tor.1.ndx'))
#####
# Page Construction
#
# '!' as first character creates a named div
# '-' as entry adds a <hr>
# The sections of the index page
self.sections = ['!header', 'header',
'!content', 'host',
'config',
'hiddenservice', 'local',
'network', 'network_bandwidth', 'network_weights', '-',
'accounting', 'monitor',
# 'family',
'control', 'messages',
'license']
#####
# The routing table
config = {
'no_session_redirect': self.redirect.path('/')
}
self.app.route('/',
method='GET',
callback=self.get_start,
**config)
self.app.route('/<session_id>/',
method='GET',
callback=self.get_restart,
**config)
self.app.route('/<session_id>/failed.html',
method='GET',
callback=self.get_failed,
**config)
config['valid_status'] = ['ok', 'auto']
self.app.route('/<login_id>/login.html',
method='GET',
callback=self.perform_login,
**config)
self.app.route('/<session_id>/logout.html',
method='GET',
callback=self.get_logout,
**config)
config['valid_status'] = ['ok', 'auto', 'prepared']
self.app.route('/<session>/index.html',
method='GET',
callback=self.get_index,
**config)
config = {
'valid_status': ['ok', 'auto']
}
self.app.route('/<session>/data.html',
method='POST',
callback=self.post_data,
**config)
config = {}
self.app.route('/<session>/manpage.html',
method='GET',
callback=self.get_manpage,
**config)
# config = {
# 'valid_status': ['cc_ok']
# }
#
# self.app.route('/<session>/details',
# method='GET',
# callback=self.get_details,
# **config)
# def debug_request():
# self.log.debug(request.environ['PATH_INFO'])
#
# # Log connection requests...
# self.app.add_hook('before_request', debug_request)
# Plugin for session management
self.app.install(SessionPlugin(self.sessions))
# Default Landing Page
def get_start(self):
session = self.sessions.create_session(request, 'login')
if session is None:
raise HTTPError(404)
# This allows to login via a command line provided password (to address HashedControlPassword authmethod)
pwd = self.nodes['theonionbox'].config.password
if pwd is not None:
session['password'] = pwd
session['logout:show'] = False
return self.connect_session_to_node(session, 'theonionbox', proceed_to_page=self.default_page)
def connect_session_to_node(self, session: Session, node_id: str, proceed_to_page: Optional[str] = None):
from stem.connection import MissingPassword, IncorrectPassword
if proceed_to_page is None:
proceed_to_page = self.default_page
try:
super().connect_session_to_node(session, node_id)
except (MissingPassword, IncorrectPassword):
return self.create_login_page(session, self.nodes[node_id], proceed_to_page)
except HTTPError:
raise
except Exception as exc:
return self.create_error_page(session, exc)
session['status'] = 'auto' # this indicates that there is no login necessary; therefore no logout possible!
self.redirect(f'/{session.id}/{proceed_to_page}')
def create_error_page(self, session: Session, display_error: BaseException):
self.log.info(f'Session {make_short_id(session.id)}: Error Page @ "{display_error}"')
# The sections of the error page
error_sections = ['!header', 'header', '!content', 'error', 'license']
# We failed to connect to Tor and have to admit this now!
session['status'] = 'error'
session['stylesheets'] = ['bootstrap.css', 'latolatin/latolatinfonts.css', 'box.css']
session['scripts'] = ['jquery.js', 'bootstrap.js', 'box.js']
section_config = {}
section_config['header'] = {
'logout': False,
'title': 'The Onion Box',
'subtitle': "Version: {}<br>Your address: {}".format(self.config.stamped_version, request.get('REMOTE_ADDR'))
}
params = {
'session': session
, 'tor': None
, 'session_id': session.id
, 'icon': self.icon
, 'box_stamp': self.config.stamped_version
, 'virtual_basepath': self.config.box.base_path
, 'sections': error_sections
, 'section_config': section_config
, 'error_msg': display_error
, 'box.js_login': True # flag to manipulate the creation process of 'box.js'
, 'template_lookup': [str(self.cwd)] # search path for the template engine...
# , 'tools': template_tools.__name__
}
# prepare the includes
session['box.js'] = template('scripts/box.js', **params)
session['box.css'] = template('css/box.css', **params)
# session['fonts.css'] = template('css/latolatinfonts.css', **params)
# deliver the error page
return template('pages/index.html', **params)
def create_login_page(self, session: Session, node: Node, proceed_to_page: str):
self.log.debug('Session {} o-A-o {} Node'.format(session.id, node.id))
# The sections of the login page
login_sections = ['!header', 'header', '!content', 'login', 'license']
# Standard login Page delivery
session['auth'] = 'digest' if node.controller.password is not None else 'basic'
session['stylesheets'] = ['bootstrap.css', 'latolatin/latolatinfonts.css', 'box.css']
session['scripts'] = ['jquery.js', 'bootstrap.js', 'auth.js', 'box.js']
section_config = {}
section_config['header'] = {
'logout': False,
'title': 'The Onion Box',
'subtitle': "Version: {}<br>Your address: {}".format(self.config.stamped_version, request.get('REMOTE_ADDR'))
}
section_config['login'] = {
# 'timeout': self.config.box.ttl * 1000 # js!
'timeout': 20 * 1000 # js!
}
params = {
'session': session
, 'tor': node.controller
, 'session_id': session.id
, 'icon': self.icon
, 'box_stamp': self.config.stamped_version
, 'virtual_basepath': self.config.box.base_path
, 'sections': login_sections
, 'section_config': section_config
, 'proceed_to': proceed_to_page
, 'box.js_login': True # flag to manipulate the creation process of 'box.js'
, 'template_lookup': [str(self.cwd)]
}
# prepare the includes
session['box.js'] = template('scripts/box.js', **params)
session['box.css'] = template('css/box.css', **params)
if 'auth' in session:
if session['auth'] == 'basic':
session['auth.js'] = template('scripts/authrequest_basic.js'
, virtual_basepath=self.config.box.base_path
, proceed_to = proceed_to_page
, session_id=session.id
, template_lookup=[str(self.cwd)]
)
else: # e.g. if login['auth'] == 'digest'
session['auth.js'] = template('scripts/authrequest_digest.js'
, virtual_basepath=self.config.box.base_path
, proceed_to = proceed_to_page
, session_id=session.id
, template_lookup=[str(self.cwd)]
)
session['scripts'].append('md5.js')
# deliver the login page
return template('pages/index.html', **params)
def get_restart(self, session_id):
# When monitoring a controlled node, this is the default landing page
# in case of login error or logout procedure
# The CC uses this as well to launch the detail page delivery.
session = self.sessions.get_session_without_validation(session_id)
if session is None:
self.redirect('/')
status = session.get('status')
if status != 'login':
self.redirect('/')
# session shall already be a newly created one, status 'login'
# carrying in 'cached_node' the id of the node formerly connected to (or intending to log into)
node_id = session.get('node')
cached_id = session.get('cached_node')
# 'node_id is not None' indicates that the session already went through a 'connect_session_...' cycle
# This happens when the user reloads the page after a failed attempt to login
# => Restart with a fresh session
# session might be expired! In that case, we create a new session and restart
if node_id is not None or session.expired:
self.sessions.delete_session(session)
new_session = self.sessions.create_session(request, 'login')
new_session['cached_node'] = node_id
self.redirect(f'/{new_session.id}/')
if cached_id is None:
self.sessions.delete_session(session)
self.redirect('/')
return self.connect_session_to_node(session, cached_id, proceed_to_page="index.html")
#####
# The Authentication System
def perform_login(self, login_id):
from ..authenticate import authenticate
self.log.debug(f'Login Request: {make_short_id(login_id)}@{request.remote_addr} / {request.remote_route}')
# boxLog.debug("{}@{} requests '{}'".format(make_short_id(login_id), request.remote_addr, 'login.html'))
# boxLog.debug(
# "{}: addr = {} / route = {}".format(make_short_id(login_id), request.remote_addr, request.remote_route))
session = self.sessions.get_session(login_id, request)
if session is None:
self.redirect('/')
# print(session['status'])
if session['status'] != 'login':
self.sessions.delete_session(session)
self.redirect('/')
node = None
try:
node = self.nodes.get(session['node'])
except:
self.sessions.delete_session(session)
self.redirect('/')
if 'login' in session:
session['login'] += 1
if session['login'] > 1 or (self.time() - session['login_time']) > 1.5:
self.sessions.delete_session(session)
raise HTTPError(404)
else:
session['login'] = 0
session['login_time'] = self.time()
header = request.environ.get('HTTP_AUTHORIZATION', '')
# this raises HTTPError(401) to perform the authentication procedure
pwd = authenticate(session, node, header)
# at this stage we have a successful login
# and switch to standard session management
self.sessions.delete_session(session)
authed_session = self.sessions.create_session(request, 'prepared')
if authed_session is not None:
authed_session['node'] = node.id
authed_session['prep_time'] = self.time()
authed_session['password'] = pwd
return authed_session.id
raise HTTPError(404)
def get_failed(self, session_id):
session = self.sessions.get_session_without_validation(session_id)
if session is not None:
node_id = session.get('node')
self.log.notice(f'{session.id_short()}@{request.remote_addr}: Login failed.')
self.sessions.delete_session(session)
new_session = self.sessions.create_session(request, 'login')
new_session['cached_node'] = node_id
self.redirect('/' + new_session.id + '/')
self.log.warning(f"Unknown client @ 'failed.html': {make_short_id(session_id)}@{request.remote_addr}")
self.redirect('/')
# This is the standard page!
def get_index(self, session):
status = session['status']
if status == 'prepared':
delay = self.time() - session['prep_time']
if delay > 2.0: # seconds
session['status'] = 'toolate' # ;)
self.log.warning(f'{session.id_short()}: Login to Session delay expired. Session canceled.')
else:
session['status'] = 'ok'
# we have a successfull connection! Celebrate this!
self.log.notice(f'{session.id_short()}: Session established.')
if session['status'] not in ['ok', 'auto']:
self.sessions.delete_session(session)
self.redirect('/')
try:
node = self.nodes.get(session['node'])
except Exception:
self.sessions.delete_session(session)
self.redirect('/')
tor = node.controller
fingerprint = tor.fingerprint
if self.verify_fingerprint(fingerprint) is True:
oo = getOnionoo()
oo.trigger(fingerprint)
# reset the time flags!
del session['cpu']
del session['accounting']
del session['monitor']
del session['network']
del session['network_bw']
del session['network_weights']
del session['family']
# setup the MessageHandler for this session
# node.torLogMgr.add_client(session.id)
# prepare the preserved events for hardcoded transfer
from ..log import sanitize_for_html
# p_ev = node.torLogMgr.get_events(session.id, encode=sanitize_for_html)
p_ev = []
accounting_stats = {}
try:
accounting_stats = tor.get_accounting_stats()
accounting_switch = True
except:
accounting_switch = False
#####
# Page Construction
#
# '!' as first character creates a named div
# '-' as entry adds a <hr>
# !header
# header
# !content
# host
# config
# hiddenservice
# local
# network
# network_bandwidth
# network_weights
# -
# accounting
# monitor
# family
# control
# messages
# license
sections = ['!header', 'header',
'!content']
# sections += ['controlcenter']
if tor.is_localhost():
sections += ['host']
sections += ['config']
hsc = tor.get_hidden_service_conf(None)
if hsc is not None and len(hsc) > 0:
sections += ['hiddenservice']
sections += ['local']
params = {}
sections_oo = []
# with contextlib.suppress(AttributeError):
if node.onionoo is not None and node.onionoo.details.has_data():
sections_oo += ['network']
params['oo_details'] = node.onionoo.details
if node.onionoo.bandwidth.has_data():
sections_oo += ['network_bandwidth']
params['oo_bw'] = node.onionoo.bandwidth
if node.onionoo.weights.has_data():
sections_oo += ['network_weights']
params['oo_weights'] = node.onionoo.weights
if len(sections_oo) > 0:
sections_oo += ['-'] # <hr>
sections += sections_oo
if accounting_switch is True:
sections += ['accounting']
sections += ['monitor']
sections += ['transport']
# this is for testing purposes only
# sections += ['nodes', 'transport']
# if fingerprint:
# sections.append('family')
sections += ['messages']
sections += ['license']
session['sections'] = sections
session['stylesheets'] = ['bootstrap.css', 'latolatin/latolatinfonts.css', 'glide.core.css', 'glide.theme.css']
session['scripts'] = ['jquery.js', 'bootstrap.js', 'smoothie.js', 'chart.js', 'scrollMonitor.js', 'glide.js']
# to ensure, that 'box.xxx' is the latest element...
session['stylesheets'].append('box.css')
session['scripts'].append('box.js')
import socket
if tor.is_localhost() is True:
at_location = socket.gethostname()
else:
try:
at_location = tor.get_info('address')
except:
at_location = 'Remote Location'
if session['logout:show'] is not None:
# 20200102: Currently only used if password for default node provided via command line.
show_logout = session['logout:show']
else:
show_logout = session['status'] != 'auto' or session['password'] is not None
section_config = {}
section_config['header'] = {
'logout': show_logout,
'title': tor.nickname,
'subtitle': f"Tor {tor.version_short} @ {at_location}<br>{fingerprint}",
'powered': f"monitored by <b>The Onion Box</b> v{self.config.stamped_version}"
}
def get_lines(info):
info_lines = info.split('\n')
return len(info_lines)
transport = {
'or': get_lines(tor.get_info('orconn-status')),
'stream': get_lines(tor.get_info('stream-status')),
'circ': get_lines(tor.get_info('circuit-status')),
}
# This is the initial session token
session['token'] = uuid.uuid4().hex
# Marker for the map
icon_marker = "iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAA" \
"DsMAAA7DAcdvqGQAAAAYdEVYdFNvZnR3YXJlAHBhaW50Lm5ldCA0LjAuOWwzfk4AAAC9SURBVDhPpZK9DcJADIVTUFBcmTXo" \
"KTNAxqFIiRR6RonEAhQMQUmRMSiO90U2csSBAhSfdLbf8/25yjk/OWxOSXRiEKPBmlyK2mhqxE3kN1BrZkYSQXARragN1mdB" \
"7S62k1ELjuc7HcXKuzrkRG+aq1iT5Py+04sporrvvCPg8gRtSRxBY9qBgJcjqEviCBrTjn8Zfz6qPw4XX/o4HUH8jr5kANX2" \
"pkGbPBkHgK6fBmCantjx+5EL5oVDnqsHL/DYhRMxwWIAAAAASUVORK5CYII="
# Preparation for the Configuration display
# 20200104: This used to be part of config.html - yet moved here due to (relative) import issues
from ..configcollector import ConfigCollector
cfgcoll = ConfigCollector(tor)
configs_used = cfgcoll.collect_configs_used()
# params initialized before for onionoo data
params.update({
'session': session
# , 'read_bytes': node.bwdata['download']
# , 'written_bytes': node.bwdata['upload']
, 'tor': tor
, 'host': self.system
, 'session_id': session.id
, 'preserved_events': p_ev
, 'server_time': self.time()
, 'accounting_on': accounting_switch
, 'accounting_stats': accounting_stats
, 'icon': self.icon
, 'marker': icon_marker
, 'box_stamp': self.config.stamped_version
, 'box_debug': False # ToDo: Adjust to config.debug!!
, 'boxVersion': self.version
, 'virtual_basepath': self.config.box.base_path
, 'sections': sections
, 'manpage': self.manpage
# , 'oo_show': onionoo_show
# , 'oo_details': node.onionoo.details()
# , 'oo_bw': node.onionoo.bandwidth()
# , 'oo_weights': node.onionoo.weights()
, 'section_config': section_config
# , 'oo_factory': node.onionoo
, 'geoip': self.geoip2
, 'family_fp': fingerprint
# , 'controlled_nodes': box_cc
, 'transport_status': transport
, 'token': session['token']
, 'cwd': str(self.cwd)
, 'configs_used': configs_used
, 'template_lookup': [str(self.cwd)]
})
# Test
# from bottle import SimpleTemplate
# tpl = SimpleTemplate(name='scripts/box.js')
# tpl.prepare(syntax='/* */ // {{ }}')
# bjs = tpl.render(**params)
# re-ping the session - to prevent accidential timeout
self.sessions.get_session(session.id, request)
# prepare the includes
session['box.js'] = template('scripts/box.js', **params)
session['box.css'] = template('css/box.css', **params)
# session['fonts.css'] = template('css/latolatinfonts.css', **params)
# create the dashboard
index = template('pages/index.html', **params)
# re-ping the session - to prevent accidential timeout
self.sessions.get_session(session.id, request)
# deliver the dashboard
return index
def post_data(self, session):
session_id = session.id
node = None
tor = None
# Session token verification
token = request.forms.get('token', None)
if token is None or token != session['token']:
self.sessions.delete_session(session)
self.log.warning("Token mismatch at session {}. Session closed.".format(session.id_short()))
raise HTTPError(404)
if session['status'] in ['ok', 'auto']:
try:
node = self.nodes[session['node']]
tor = node.controller
except:
raise HTTPError(404)
fp = tor.fingerprint
elif session['status'] in ['search']:
fp = session['search']
else:
raise HTTPError(503)
# create new session token
session['token'] = uuid.uuid4().hex
its_now = int(self.time()) * 1000 # JS!
return_data_dict = {'tick': its_now}
box_sections = session['sections']
# host
if 'host' in box_sections:
since = session['cpu'] if 'cpu' in session else None
cpu_list = self.system.get_performance_data(after=since)
# this little hack ensures, that we deliver data on the
# first *two* calls after launch!
session['cpu'] = its_now if 'cpu' in session else 0
if len(cpu_list) > 0:
return_data_dict['gen'] = cpu_list
# accounting
if 'accounting' in box_sections:
from stem import ControllerError
accs = None
try:
accs = tor.get_accounting_stats()
ret = accs.retrieved
# to compensate for 'Object of type datetime is not JSON serializable' error
accs = accs._replace(interval_end=accs.interval_end.timestamp())
# our JS script needs the field names...
accs = accs._asdict()
except ControllerError:
ret = its_now
acc = {'enabled': accs is not None}
if accs is not None:
acc['stats'] = accs
session['accounting'] = ret # currently not used!
return_data_dict['acc'] = acc
# messages
if 'messages' in box_sections:
runlevel = request.forms.get('runlevel', None)
if runlevel:
rl_dict = json.JSONDecoder().decode(runlevel)
# boxLog.debug('Levels from the client @ {}: {}'.format(session_id, rl_dict))
for key in rl_dict:
changed = node.logs.switch(session_id, key, rl_dict[key])
# auto cancel 'DEBUG' mode after 30 seconds
if key == 'DEBUG' and rl_dict[key] and changed:
Timer(30,
node.logs.switch,
kwargs={'session_id': session_id,
'level': key,
'status': False}).start()
from ..log import sanitize_for_html
log_list = node.logs.get_events(session_id, encode=sanitize_for_html)
if log_list and len(log_list) > 0:
return_data_dict['msg'] = log_list
log_status = node.logs.get_status(session_id)
if log_status is not None:
return_data_dict['msg_status'] = log_status
# get the onionoo data
# onionoo_details = onionoo.details(fp)
# onionoo_bw = onionoo.bandwidth(fp)
# onionoo_weights = onionoo.weights(fp)
# operations monitoring
if 'monitor' in box_sections:
from ..livedata import intervals
return_data_dict['mon'] = {}
last_ts = None
if 'monitor' in session:
last_ts = session['monitor']
if last_ts == 0:
last_ts = None
for interval in intervals:
try:
retval = node.bandwidth.get_data(interval=interval, since_timestamp=last_ts)
if len(retval) > 0:
return_data_dict['mon'][interval] = retval
except Exception as e:
pass
# if ('network_bw' not in session) or (session['network_bw'] == 0):
# res = {}
# obwr = node.onionoo.bandwidth.read()
# obww = node.onionoo.bandwidth.write()
# # obwr = None
# # obww = None
# if obwr is not None:
# res['read'] = obwr
# if obww is not None:
# res['write'] = obww
# if len(res) > 0:
# return_data_dict['mon']['oo_bw'] = res
# this little hack ensures, that we deliver data on the
# first *two* calls after launch!
session['monitor'] = its_now if 'monitor' in session else 0
# operations monitoring
# if 'controlcenter' in box_sections:
#
# return_data_dict['cc'] = {}
# last_ts = None
# if 'controlcenter' in session:
# last_ts = session['controlcenter']
# if last_ts == 0:
# last_ts = None
#
# # try:
# # retval = node.livedata.get_data(since_timestamp=last_ts)
# # if len(retval) > 0:
# # return_data_dict['cc']['1s'] = retval
# # except Exception as e:
# # print(e)
# # pass
#
# retval = node.livedata.get_data(since_timestamp=last_ts)
# if len(retval) > 0:
# return_data_dict['cc']['1s'] = retval
#
# # this little hack ensures, that we deliver data on the
# # first *two* calls after launch!
# session['controlcenter'] = its_now if 'cc' in session else 0
if 'network' in box_sections:
# Once there was code here.
# It's no more ;) !
pass
if 'network_bandwidth' in box_sections:
if ('network_bw' not in session) or (session['network_bw'] == 0):
return_data_dict['oo_bw'] = {'read': node.onionoo.bandwidth.read(),
'write': node.onionoo.bandwidth.write()}
# this little hack ensures, that we deliver data on the
# first *two* calls after launch!
session['network_bw'] = node.onionoo.bandwidth.published() if 'network_bw' in session else 0
elif session['network_bw'] != node.onionoo.bandwidth.published():
del session['network_bw']
# if there's new data act as if we've not had any before!
# we'll therefore deliver the new data with the next run!
if 'network_weights' in box_sections:
if ('network_weights' not in session) or (session['network_weights'] == 0):
details = {'cw': node.onionoo.details('consensus_weight'),
'cwf': node.onionoo.details('consensus_weight_fraction'),
'gp': node.onionoo.details('guard_probability'),
'mp': node.onionoo.details('middle_probability'),
'ep': node.onionoo.details('exit_probability')}
return_data_dict['oo_weights'] = {'cw': node.onionoo.weights.consensus_weight(),
'cwf': node.onionoo.weights.consensus_weight_fraction(),
'ep': node.onionoo.weights.exit_probability(),
'gp': node.onionoo.weights.guard_probability(),
'mp': node.onionoo.weights.middle_probability(),
'data': details
}
# print(return_data_dict['oo_weights'])
# this little hack ensures, that we deliver data on the
# first *two* calls after launch!
session['network_weights'] = node.onionoo.weights.published() if 'network_weights' in session else 0
elif session['network_weights'] != node.onionoo.weights.published():
del session['network_weights']
# if there's new data act as if we've not had any before!
# we'll therefore deliver the new data with the next run!
if 'family_xx' in box_sections:
# get the family entries from the onionoo details of the node
fp = tor.fingerprint
family_details = onionoo.details(fp)
if family_details is not None:
# there are several different categories of families
fams = ['effective_family', 'alleged_family', 'indirect_family']
family_data = {} # the read / write data for one node; key = fingerprint of node (pre 5.0: [1:])
family_nodes = [] # list of fingerprints of the nodes (pre 5.0: [1:])
if 'family' not in session:
session['family'] = {}
session_family = session['family']
# iterate through the categories
for fam in fams:
# get the nodes per category
fam_det = family_details(fam)
if fam_det is not None:
# iterate through the nodes
for fp in fam_det:
# onionoo protocol v5.0 adaptation
node_fp = fp[1:] if fp[0] is '$' else fp
node_key = 'family:{}'.format(node_fp)
node_bw = onionoo.bandwidth(node_fp)
if node_bw is not None:
if (node_key not in session_family) or (session_family[node_key] == 0):
family_nodes.append(node_fp)
family_data[node_fp] = {'read': node_bw.read(), 'write': node_bw.write()}
# this little hack ensures, that we deliver data on the
# first *two* calls after launch!
session_family[node_key] = node_bw.published() if node_key in session_family else 0
elif session_family[node_key] != node_bw.published():
del session_family[node_key]
# if there's new data act as if we've not had any before!
# we'll therefore deliver the new data with the next run!
# if we found some family entries
if len(family_nodes) > 0:
# prepare the data
family_data['keys'] = family_nodes
return_data_dict['oo_family'] = family_data
if 'transport' in box_sections:
return_data_dict['transport'] = { 'or': node.connections.count,
'circ': node.circuits.count,
'stream': node.streams.count}
#provide new session token
return_data_dict['token'] = session['token']
# Now json everything... and return it!
return json.JSONEncoder().encode(return_data_dict)
def get_logout(self, session_id):
session = self.sessions.get_session_without_validation(session_id)
if session is not None:
node_id = session.get('node')
self.log.notice(f'{session.id_short()}@{request.remote_addr}: Active LogOut!')
self.sessions.delete_session(session)
new_session = self.sessions.create_session(request, 'login')
new_session['cached_node'] = node_id
self.redirect(f'/{new_session.id}/')
self.log.warning(f'LogOut requested from unknown client: {make_short_id(session_id)}@{request.remote_addr}')
self.redirect('/')
def get_manpage(self, session):
return static_file('tor.1.html', root=str(self.cwd / 'tor'), mimetype='text/html')
| |
from django.shortcuts import render, get_object_or_404, redirect
from django.core.urlresolvers import reverse
from django.http import Http404, QueryDict
from django.contrib import auth
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
from django.contrib.auth.views import password_reset_confirm as django_password_reset_confirm
from django.utils.http import urlsafe_base64_decode, is_safe_url
from django.views.generic import ListView
from froide.foirequest.models import FoiRequest, FoiEvent
from froide.helper.auth import login_user
from froide.helper.utils import render_403
from .forms import (UserLoginForm, PasswordResetForm, NewUserForm,
UserEmailConfirmationForm, UserChangeForm, UserDeleteForm, TermsForm)
from .models import AccountManager
from .utils import cancel_user
def confirm(request, user_id, secret, request_id=None):
if request.user.is_authenticated:
messages.add_message(request, messages.ERROR,
_('You are logged in and cannot use a confirmation link.'))
return redirect('account-show')
user = get_object_or_404(auth.get_user_model(), pk=int(user_id))
if user.is_active:
return redirect('account-login')
account_manager = AccountManager(user)
if account_manager.confirm_account(secret, request_id):
messages.add_message(request, messages.WARNING,
_('Your email address is now confirmed and you are logged in. You should change your password now by filling out the form below.'))
login_user(request, user)
if request_id is not None:
foirequest = FoiRequest.confirmed_request(user, request_id)
if foirequest:
messages.add_message(request, messages.SUCCESS,
_('Your request "%s" has now been sent') % foirequest.title)
next = request.GET.get('next', request.session.get('next'))
if next:
if 'next' in request.session:
del request.session['next']
return redirect(next)
return redirect(reverse('account-settings') + "?new#change-password-now")
else:
messages.add_message(request, messages.ERROR,
_('You can only use the confirmation link once, please login with your password.'))
return redirect('account-login')
def go(request, user_id, secret, url):
if request.user.is_authenticated:
if request.user.id != int(user_id):
messages.add_message(request, messages.INFO,
_('You are logged in with a different user account. Please logout first before using this link.'))
else:
user = get_object_or_404(auth.get_user_model(), pk=int(user_id))
if not user.is_active:
messages.add_message(request, messages.ERROR,
_('Your account is not active.'))
raise Http404
account_manager = AccountManager(user)
if account_manager.check_autologin_secret(secret):
login_user(request, user)
return redirect(url)
class BaseRequestListView(ListView):
paginate_by = 20
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect('account-login')
return super(BaseRequestListView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(BaseRequestListView, self).get_context_data(**kwargs)
no_page_query = QueryDict(self.request.GET.urlencode().encode('utf-8'),
mutable=True)
no_page_query.pop('page', None)
context['getvars'] = no_page_query.urlencode()
context['menu'] = self.menu_item
return context
class MyRequestsView(BaseRequestListView):
template_name = 'account/show_requests.html'
menu_item = 'requests'
def get_queryset(self):
self.query = self.request.GET.get('q', None)
return FoiRequest.objects.get_dashboard_requests(self.request.user, query=self.query)
def get_context_data(self, **kwargs):
context = super(MyRequestsView, self).get_context_data(**kwargs)
if 'new' in self.request.GET:
self.request.user.is_new = True
return context
class FollowingRequestsView(BaseRequestListView):
template_name = 'account/show_following.html'
menu_item = 'following'
def get_queryset(self):
self.query = self.request.GET.get('q', None)
query_kwargs = {}
if self.query:
query_kwargs = {'title__icontains': self.query}
return FoiRequest.objects.filter(
foirequestfollower__user=self.request.user, **query_kwargs)
def profile(request, slug):
user = get_object_or_404(auth.get_user_model(), username=slug)
if user.private:
raise Http404
foirequests = FoiRequest.published.filter(user=user).order_by('-first_message')
foievents = FoiEvent.objects.filter(public=True, user=user)[:20]
return render(request, 'account/profile.html', {
'profile_user': user,
'requests': foirequests,
'events': foievents
})
@require_POST
def logout(request):
auth.logout(request)
messages.add_message(request, messages.INFO,
_('You have been logged out.'))
# return redirect("/")
return redirect("account-login")
def login(request, base="base.html", context=None,
template='account/login.html', status=200):
simple = False
initial = None
if not context:
context = {}
if "reset_form" not in context:
context['reset_form'] = PasswordResetForm()
if "signup_form" not in context:
context['signup_form'] = NewUserForm()
if request.GET.get("simple") is not None:
base = "simple_base.html"
simple = True
if request.GET.get('email'):
initial = {'email': request.GET.get('email')}
else:
if request.user.is_authenticated:
return redirect('account-show')
if request.method == "POST" and status == 200:
status = 400 # if ok, we are going to redirect anyways
next = request.POST.get('next')
form = UserLoginForm(request.POST)
if form.is_valid():
user = auth.authenticate(username=form.cleaned_data['email'],
password=form.cleaned_data['password'])
if user is not None:
if user.is_active:
auth.login(request, user)
messages.add_message(request, messages.INFO,
_('You are now logged in.'))
if simple:
return redirect(reverse('account-login') + "?simple")
else:
if next:
return redirect(next)
return redirect('account-show')
else:
messages.add_message(request, messages.ERROR,
_('Please activate your mail address before logging in.'))
else:
messages.add_message(request, messages.ERROR,
_('E-mail and password do not match.'))
else:
form = UserLoginForm(initial=initial)
context.update({
"form": form,
"custom_base": base,
"simple": simple,
'next': request.GET.get('next')
})
return render(request, template, context, status=status)
@require_POST
def signup(request):
next = request.POST.get('next')
next_url = next if next else reverse('account-login')
if request.user.is_authenticated:
messages.add_message(request, messages.ERROR,
_('You are currently logged in, you cannot signup.'))
return redirect(next_url)
form = UserLoginForm()
signup_form = NewUserForm(request.POST)
next = request.POST.get('next')
if signup_form.is_valid():
user, password = AccountManager.create_user(**signup_form.cleaned_data)
signup_form.save(user)
AccountManager(user).send_confirmation_mail(password=password)
messages.add_message(request, messages.SUCCESS,
_('Please check your emails for a mail from us with a confirmation link.'))
if next:
request.session['next'] = next
return redirect(next_url)
return render(request, 'account/login.html', {
"form": form,
"signup_form": signup_form,
"custom_base": "base.html",
"simple": False
}, status=400)
@require_POST
def change_password(request):
if not request.user.is_authenticated:
messages.add_message(request, messages.ERROR,
_('You are not currently logged in, you cannot change your password.'))
return render_403(request)
form = request.user.get_password_change_form(request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS,
_('Your password has been changed.'))
return redirect('account-show')
return account_settings(request,
context={"password_change_form": form}, status=400)
@require_POST
def send_reset_password_link(request):
next = request.POST.get('next')
# next_url = next if next else '/'
next_url = next if next else reverse('account-login')
if request.user.is_authenticated:
messages.add_message(request, messages.ERROR,
_('You are currently logged in, you cannot get a password reset link.'))
return redirect(next_url)
form = auth.forms.PasswordResetForm(request.POST)
if form.is_valid():
if next:
request.session['next'] = next
form.save(use_https=True, email_template_name="account/password_reset_email.txt")
messages.add_message(request, messages.SUCCESS,
_("Check your mail, we sent you a password reset link."
" If you don't receive an email, check if you entered your"
" email correctly or if you really have an account "))
return redirect(next_url)
return login(request, context={"reset_form": form}, status=400)
def password_reset_confirm(request, uidb64=None, token=None):
# TODO: Fix this code
# - don't sniff response
# - make redirect
response = django_password_reset_confirm(request, uidb64=uidb64, token=token,
template_name='account/password_reset_confirm.html',
post_reset_redirect=reverse('account-show'))
if response.status_code == 302:
uid = urlsafe_base64_decode(uidb64)
user = auth.get_user_model().objects.get(pk=uid)
login_user(request, user)
messages.add_message(request, messages.SUCCESS,
_('Your password has been set and you are now logged in.'))
if 'next' in request.session and is_safe_url(
url=request.session['next'],
host=request.get_host()):
response['Location'] = request.session['next']
del request.session['next']
return response
def account_settings(request, context=None, status=200):
if not request.user.is_authenticated:
return redirect('account-login')
if not context:
context = {}
if 'new' in request.GET:
request.user.is_new = True
if 'user_delete_form' not in context:
context['user_delete_form'] = UserDeleteForm(request.user)
if 'change_form' not in context:
context['change_form'] = UserChangeForm(request.user)
return render(request, 'account/settings.html', context, status=status)
@require_POST
def change_user(request):
if not request.user.is_authenticated:
messages.add_message(request, messages.ERROR,
_('You are not currently logged in, you cannot change your address.'))
return render_403(request)
form = UserChangeForm(request.user, request.POST)
if form.is_valid():
if request.user.email != form.cleaned_data['email']:
AccountManager(request.user).send_email_change_mail(
form.cleaned_data['email']
)
messages.add_message(request, messages.SUCCESS,
_('We sent a confirmation email to your new address. Please click the link in there.'))
form.save()
messages.add_message(request, messages.SUCCESS,
_('Your profile information has been changed.'))
return redirect('account-settings')
messages.add_message(request, messages.ERROR,
_('Please correct the errors below. You profile information was not changed.'))
return account_settings(request,
context={"change_form": form}, status=400)
def change_email(request):
if not request.user.is_authenticated:
messages.add_message(request, messages.ERROR,
_('You are not currently logged in, you cannot change your email address.'))
return render_403(request)
form = UserEmailConfirmationForm(request.user, request.GET)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS,
_('Your email address has been changed.'))
else:
messages.add_message(request, messages.ERROR,
_('The email confirmation link was invalid or expired.'))
return redirect('account-settings')
@require_POST
def delete_account(request):
if not request.user.is_authenticated:
messages.add_message(request, messages.ERROR,
_('You are not currently logged in, you cannot delete your account.'))
return render_403(request)
form = UserDeleteForm(request.user, request.POST)
if not form.is_valid():
messages.add_message(request, messages.ERROR,
_('Password or confirmation phrase were wrong. Account was not deleted.'))
return account_settings(
request,
context={
'user_delete_form': form
},
status=400
)
# Removing all personal data from account
cancel_user(request.user)
auth.logout(request)
messages.add_message(request, messages.INFO,
_('Your account has been deleted and you have been logged out.'))
#return redirect('/')
return redirect('account-login')
def new_terms(request, next=None):
if next is None:
next = request.GET.get('next', '/')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
if not request.user.is_authenticated:
return redirect(next)
if request.user.terms:
return redirect(next)
form = TermsForm()
if request.POST:
form = TermsForm(request.POST)
if form.is_valid():
form.save(request.user)
messages.add_message(request, messages.SUCCESS,
_('Thank you for accepting our new terms!'))
return redirect(next)
else:
messages.add_message(request, messages.ERROR,
_('You need to accept our new terms to continue.'))
return render(request, 'account/new_terms.html', {
'terms_form': form,
'next': next
})
def csrf_failure(request, reason=''):
return render_403(request, message=_("You probably do not have cookies enabled, but you need cookies to use this site! Cookies are only ever sent securely. The technical reason is: %(reason)s") % {"reason": reason})
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, sys, sqlite3, datetime, urllib, gzip, requests
from time import sleep
from flask import Flask, render_template, g, request, redirect, url_for, send_from_directory, session, flash, jsonify, make_response, Markup, Response
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user, wraps
from itsdangerous import URLSafeTimedSerializer # for safe session cookies
from collections import defaultdict as dd
from collections import OrderedDict as od
from hashlib import md5
from werkzeug import secure_filename
from lxml import etree
from packaging.version import Version
## profiler
#from werkzeug.contrib.profiler import ProfilerMiddleware
from common_login import *
from common_sql import *
from omw_sql import *
from wn_syntax import *
from math import log
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = "!$flhgSgngNO%$#SOET!$!"
app.config["REMEMBER_COOKIE_DURATION"] = datetime.timedelta(minutes=30)
## profiler
#app.config['PROFILE'] = True
#app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
#app.run(debug = True)
################################################################################
# LOGIN
################################################################################
login_manager.init_app(app)
@app.route("/login", methods=["GET", "POST"])
def login():
""" This login function checks if the username & password
match the admin.db; if the authentication is successful,
it passes the id of the user into login_user() """
if request.method == "POST" and \
"username" in request.form and \
"password" in request.form:
username = request.form["username"]
password = request.form["password"]
user = User.get(username)
# If we found a user based on username then compare that the submitted
# password matches the password in the database. The password is stored
# is a slated hash format, so you must hash the password before comparing it.
if user and hash_pass(password) == user.password:
login_user(user, remember=True)
# FIXME! Get this to work properly...
# return redirect(request.args.get("next") or url_for("index"))
return redirect(url_for("index"))
else:
flash(u"Invalid username, please try again.")
return render_template("login.html")
@app.route("/logout")
@login_required(role=0, group='open')
def logout():
logout_user()
return redirect(url_for("index"))
################################################################################
################################################################################
# SET UP CONNECTION WITH DATABASES
################################################################################
@app.before_request
def before_request():
g.admin = connect_admin()
g.omw = connect_omw()
@app.teardown_request
def teardown_request(exception):
if hasattr(g, 'db'):
g.admin.close()
g.omw.close()
################################################################################
################################################################################
# AJAX REQUESTS
################################################################################
@app.route('/_thumb_up_id')
def thumb_up_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
rate = 1
r = rate_ili_id(ili_id, rate, user)
counts, up_who, down_who = f_rate_summary([ili_id])
html = """ <span style="color:green" title="{}">+{}</span><br>
<span style="color:red" title="{}">-{}</span>
""".format(up_who[int(ili_id)], counts[int(ili_id)]['up'],
down_who[int(ili_id)], counts[int(ili_id)]['down'])
return jsonify(result=html)
@app.route('/_thumb_down_id')
def thumb_down_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
rate = -1
r = rate_ili_id(ili_id, rate, user)
counts, up_who, down_who = f_rate_summary([ili_id])
html = """ <span style="color:green" title="{}">+{}</span><br>
<span style="color:red" title="{}">-{}</span>
""".format(up_who[int(ili_id)], counts[int(ili_id)]['up'],
down_who[int(ili_id)], counts[int(ili_id)]['down'])
return jsonify(result=html)
@app.route('/_comment_id')
def comment_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
comment = request.args.get('comment', None)
comment = str(Markup.escape(comment))
dbinsert = comment_ili_id(ili_id, comment, user)
return jsonify(result=dbinsert)
@app.route('/_detailed_id')
def detailed_id():
ili_id = request.args.get('ili_id', None)
rate_hist = fetch_rate_id([ili_id])
comm_hist = fetch_comment_id([ili_id])
users = fetch_allusers()
r_html = ""
for r, u, t in rate_hist[int(ili_id)]:
r_html += '{} ({}): {} <br>'.format(users[u]['userID'], t, r)
c_html = ""
for c, u, t in comm_hist[int(ili_id)]:
c_html += '{} ({}): {} <br>'.format(users[u]['userID'], t, c)
html = """
<td colspan="9">
<div style="width: 49%; float:left;">
<h6>Ratings</h6>
{}</div>
<div style="width: 49%; float:right;">
<h6>Comments</h6>
{}</div>
</td>""".format(r_html, c_html)
return jsonify(result=html)
@app.route('/_confirm_wn_upload')
def confirm_wn_upload_id():
user = fetch_id_from_userid(current_user.id)
fn = request.args.get('fn', None)
upload = confirmUpload(fn, user)
labels = updateLabels()
return jsonify(result=upload)
@app.route('/_add_new_project')
def add_new_project():
user = fetch_id_from_userid(current_user.id)
proj = request.args.get('proj_code', None)
proj = str(Markup.escape(proj))
if user and proj:
dbinsert = insert_new_project(proj, user)
return jsonify(result=dbinsert)
else:
return jsonify(result=False)
@app.route("/_load_lang_selector",methods=["GET"])
def omw_lang_selector():
selected_lang = request.cookies.get('selected_lang')
selected_lang2 = request.cookies.get('selected_lang2')
lang_id, lang_code = fetch_langs()
html = '<select name="lang" style="font-size: 85%; width: 9em" required>'
for lid in lang_id.keys():
if selected_lang == str(lid):
html += """<option value="{}" selected>{}</option>
""".format(lid, lang_id[lid][1])
else:
html += """<option value="{}">{}</option>
""".format(lid, lang_id[lid][1])
html += '</select>'
html += '<select name="lang2" style="font-size: 85%; width: 9em" required>'
for lid in lang_id.keys():
if selected_lang2 == str(lid):
html += """<option value="{}" selected>{}</option>
""".format(lid, lang_id[lid][1])
else:
html += """<option value="{}">{}</option>
""".format(lid, lang_id[lid][1])
html += '</select>'
return jsonify(result=html)
@app.route('/_add_new_language')
def add_new_language():
user = fetch_id_from_userid(current_user.id)
bcp = request.args.get('bcp', None)
bcp = str(Markup.escape(bcp))
iso = request.args.get('iso', None)
iso = str(Markup.escape(iso))
name = request.args.get('name', None)
name = str(Markup.escape(name))
if bcp and name:
dbinsert = insert_new_language(bcp, iso, name, user)
return jsonify(result=dbinsert)
else:
return jsonify(result=False)
@app.route('/_load_proj_details')
def load_proj_details():
proj_id = request.args.get('proj', 0)
if proj_id:
proj_id = int(proj_id)
else:
proj_id = None
projs = fetch_proj()
srcs = fetch_src()
srcs_meta = fetch_src_meta()
html = str()
if proj_id:
i = 0
for src_id in srcs.keys():
if srcs[src_id][0] == projs[proj_id]:
i += 1
html += "<br><p><b>Source {}: {}-{}</b></p>".format(i,
projs[proj_id],srcs[src_id][1])
for attr, val in srcs_meta[src_id].items():
html += "<p style='margin-left: 40px'>"
html += attr + ": " + val
html += "</p>"
return jsonify(result=html)
@app.route('/_load_min_omw_concept/<ss>')
@app.route('/_load_min_omw_concept_ili/<ili_id>')
def min_omw_concepts(ss=None, ili_id=None):
if ili_id:
ss_ids = f_ss_id_by_ili_id(ili_id)
else:
ss_ids = [ss]
pos = fetch_pos()
langs_id, langs_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(ss_ids)
ssrels = fetch_ssrel()
return jsonify(result=render_template('min_omw_concept.html',
pos = pos,
langs = langs_id,
senses=senses,
ss=ss,
links=links,
ssrels=ssrels,
defs=defs,
exes=exes))
@app.route('/_load_min_omw_sense/<sID>')
def min_omw_sense(sID=None):
if sID:
s_id=int(sID)
langs_id, langs_code = fetch_langs()
pos = fetch_pos()
sense = fetch_sense(s_id)
forms=fetch_forms(sense[3])
selected_lang = int(request.cookies.get('selected_lang'))
labels= fetch_labels(selected_lang,[sense[4]])
src_meta= fetch_src_meta()
src_sid=fetch_src_for_s_id([s_id])
sdefs = fetch_defs_by_sense([s_id])
sdef = ''
if selected_lang in sdefs[s_id]:
sdef = sdefs[s_id][selected_lang] ## requested language
else:
sdef = sdefs[min(sdefs[s_id].keys())] ## a language
# return jsonify(result=render_template('omw_sense.html',
return jsonify(result=render_template('min_omw_sense.html',
s_id = s_id,
sdef=sdef,
sense = sense,
forms=forms,
langs = langs_id,
pos = pos,
labels = labels,
src_sid = src_sid,
src_meta = src_meta))
# l=lambda:dd(l)
# vr = l() # wn-lmf validation report
# @app.route('/_report_val1')
# def report_val1():
# filename = request.args.get('fn', None)
# if filename:
# vr1 = val1_DTD(current_user, filename)
# vr.update(vr1)
# if vr1['dtd_val'] == True:
# html = "DTD PASSED"
# return jsonify(result=html)
# else:
# html = "DTD FAILED" + '<br>' + vr['dtd_val_errors']
# return jsonify(result=html)
# else:
# return jsonify(result="ERROR")
@app.route('/_report_val2', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def report_val2():
filename = request.args.get('fn', None)
vr, filename, wn, wn_dtls = validateFile(current_user.id, filename)
return jsonify(result=render_template('validation-report.html',
vr=vr, wn=wn, wn_dtls=wn_dtls, filename=filename))
# validateFile()
# filename = request.args.get('fn', None)
# if filename:
# vr = val1_DTD(current_user, filename)
# if vr['dtd_val'] == True:
# html = "DTD PASSED"
# return jsonify(result=html)
# else:
# html = "DTD FAILED" + '<br>' + vr['dtd_val_errors']
# return jsonify(result=html)
# else:
# return jsonify(result="ERROR")
# return jsonify(result="TEST_VAL2")
################################################################################
################################################################################
# VIEWS
################################################################################
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/ili', methods=['GET', 'POST'])
def ili_welcome(name=None):
return render_template('ili_welcome.html')
@app.route('/omw', methods=['GET', 'POST'])
def omw_welcome(name=None):
projects = request.args.get('projects','current')
#print(projects)
lang_id, lang_code = fetch_langs()
src_meta=fetch_src_meta()
### sort by language, project version (Newest first)
src_sort=od()
keys=list(src_meta.keys())
keys.sort(key=lambda x: Version(src_meta[x]['version']),reverse=True) #Version
keys.sort(key=lambda x: src_meta[x]['id']) #id
keys.sort(key=lambda x: lang_id[lang_code['code'][src_meta[x]['language']]][1]) #Language
for k in keys:
if projects=='current': # only get the latest version
if src_meta[k]['version'] != max((src_meta[i]['version'] for i in src_meta
if src_meta[i]['id'] == src_meta[k]['id']),
key=lambda x: Version(x)):
continue
src_sort[k] = src_meta[k]
return render_template('omw_welcome.html',
src_meta=src_sort,
lang_id=lang_id,
lang_code=lang_code,
licenses=licenses)
@app.route('/wordnet', methods=['GET', 'POST'])
def wordnet_license(name=None):
return render_template('wordnet_license.html')
@app.route('/omw_wns', methods=['GET', 'POST'])
def omw_wns(name=None):
projects = request.args.get('projects','current')
src_meta=fetch_src_meta()
stats = []
lang_id, lang_code = fetch_langs()
### sort by language name (1), id, version (FIXME -- reverse version)
src_sort=od()
keys=list(src_meta.keys())
keys.sort(key=lambda x: Version(src_meta[x]['version']),reverse=True) #Version
keys.sort(key=lambda x: src_meta[x]['id']) #id
keys.sort(key=lambda x: lang_id[lang_code['code'][src_meta[x]['language']]][1]) #Language
for k in keys:
if projects=='current': # only get the latest version
if src_meta[k]['version'] != max((src_meta[i]['version'] for i in src_meta
if src_meta[i]['id'] == src_meta[k]['id']),
key=lambda x: Version(x)):
continue
stats.append((src_meta[k], fetch_src_id_stats(k)))
return render_template('omw_wns.html',
stats=stats,
lang_id=lang_id,
lang_code=lang_code,
licenses=licenses)
@app.route("/useradmin",methods=["GET"])
@login_required(role=99, group='admin')
def useradmin():
users = fetch_allusers()
return render_template("useradmin.html", users=users)
@app.route("/langadmin",methods=["GET"])
@login_required(role=99, group='admin')
def langadmin():
lang_id, lang_code = fetch_langs()
return render_template("langadmin.html", langs=lang_id)
@app.route("/projectadmin",methods=["GET"])
@login_required(role=99, group='admin')
def projectadmin():
projs = fetch_proj()
return render_template("projectadmin.html", projs=projs)
@app.route('/allconcepts', methods=['GET', 'POST'])
def allconcepts():
ili, ili_defs = fetch_ili()
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/temporary', methods=['GET', 'POST'])
def temporary():
ili = fetch_ili_status(2)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/deprecated', methods=['GET', 'POST'])
def deprecated():
ili = fetch_ili_status(0)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/ili/concepts/<c>', methods=['GET', 'POST'])
def concepts_ili(c=None):
c = c.split(',')
ili, ili_defs = fetch_ili(c)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/ili/search', methods=['GET', 'POST'])
@app.route('/ili/search/<q>', methods=['GET', 'POST'])
def search_ili(q=None):
if q:
query = q
else:
query = request.form['query']
src_id = fetch_src()
kind_id = fetch_kind()
status_id = fetch_status()
ili = dict()
for c in query_omw("""SELECT * FROM ili WHERE def GLOB ?
""", [query]):
ili[c['id']] = (kind_id[c['kind_id']], c['def'],
src_id[c['origin_src_id']], c['src_key'],
status_id[c['status_id']], c['superseded_by_id'],
c['t'])
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/upload', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def upload():
return render_template('upload.html')
@app.route('/metadata', methods=['GET', 'POST'])
def metadata():
return render_template('metadata.html')
@app.route('/join', methods=['GET', 'POST'])
def join():
return render_template('join.html')
@app.route('/omw/uploads/<filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
@app.route('/ili/validation-report', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def validationReport():
vr, filename, wn, wn_dtls = validateFile(current_user.id)
return render_template('validation-report.html',
vr=vr, wn=wn, wn_dtls=wn_dtls,
filename=filename)
@app.route('/ili/report', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def report():
passed, filename = uploadFile(current_user.id)
return render_template('report.html',
passed=passed,
filename=filename)
# return render_template('report.html')
@app.route('/omw/search', methods=['GET', 'POST'])
@app.route('/omw/search/<lang>,<lang2>/<q>', methods=['GET', 'POST'])
def search_omw(lang=None, q=None):
if lang and q:
lang_id = lang
lang_id2 = lang2
query = q
else:
lang_id = request.form['lang']
lang_id2 = request.form['lang2']
query = request.form['query']
query = query.strip()
sense = dd(list)
lang_sense = dd(lambda: dd(list))
# GO FROM FORM TO SENSE
for s in query_omw("""
SELECT s.id as s_id, ss_id, wid, fid, lang_id, pos_id, lemma
FROM (SELECT w_id as wid, form.id as fid, lang_id, pos_id, lemma
FROM (SELECT id, lang_id, pos_id, lemma
FROM f WHERE lemma GLOB ? AND lang_id in (?,?)) as form
JOIN wf_link ON form.id = wf_link.f_id) word
JOIN s ON wid=w_id
""", ['['+query[0].upper() + query[0].lower()+']'+query[1:],
lang_id,
lang_id2]):
sense[s['ss_id']] = [s['s_id'], s['wid'], s['fid'],
s['lang_id'], s['pos_id'], s['lemma']]
lang_sense[s['lang_id']][s['ss_id']] = [s['s_id'], s['wid'], s['fid'],
s['pos_id'], s['lemma']]
pos = fetch_pos()
lang_dct, lang_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(sense.keys())
labels = fetch_labels(lang_id, set(senses.keys()))
resp = make_response(render_template('omw_results.html',
langsel = int(lang_id),
langsel2 = int(lang_id2),
pos = pos,
lang_dct = lang_dct,
sense=sense,
senses=senses,
ss=ss,
links=links,
defs=defs,
exes=exes,
labels=labels))
resp.set_cookie('selected_lang', lang_id)
resp.set_cookie('selected_lang2', lang_id2)
return resp
@app.route('/omw/core', methods=['GET', 'POST'])
def omw_core(): ### FIXME add lang as a paramater?
return render_template('omw_core.html')
@app.route('/omw/concepts/<ssID>', methods=['GET', 'POST'])
@app.route('/omw/concepts/ili/<iliID>', methods=['GET', 'POST'])
def concepts_omw(ssID=None, iliID=None):
if iliID:
ss_ids = f_ss_id_by_ili_id(iliID)
ili, ilidefs = fetch_ili([iliID])
else:
ss_ids = [ssID]
ili, ili_defs = dict(), dict()
pos = fetch_pos()
langs_id, langs_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(ss_ids)
if (not iliID) and int(ssID) in ss:
iliID = ss[int(ssID)][0]
ili, ilidefs = fetch_ili([iliID])
sss = list(ss.keys())
for s in links:
for l in links[s]:
sss.extend(links[s][l])
selected_lang = request.cookies.get('selected_lang')
labels = fetch_labels(selected_lang, set(sss))
ssrels = fetch_ssrel()
ss_srcs=fetch_src_for_ss_id(ss_ids)
src_meta=fetch_src_meta()
core_ss, core_ili = fetch_core()
s_ids = []
for x in senses:
for y in senses[x]:
for (s_id, lemma, freq) in senses[x][y]:
s_ids.append(s_id)
slinks = fetch_sense_links(s_ids)
return render_template('omw_concept.html',
ssID=ssID,
iliID=iliID,
pos = pos,
langs = langs_id,
senses=senses,
slinks=slinks,
ss=ss,
links=links,
ssrels=ssrels,
defs=defs,
exes=exes,
ili=ili,
selected_lang = selected_lang,
selected_lang2 = request.cookies.get('selected_lang2'),
labels=labels,
ss_srcs=ss_srcs,
src_meta=src_meta,
core=core_ss)
@app.route('/omw/senses/<sID>', methods=['GET', 'POST'])
def omw_sense(sID=None):
"""display a single sense (and its variants)"""
if sID:
langs_id, langs_code = fetch_langs()
pos = fetch_pos()
s_id=int(sID)
sense = fetch_sense(s_id)
slinks = fetch_sense_links([s_id])
forms=fetch_forms(sense[3])
selected_lang = int(request.cookies.get('selected_lang'))
labels= fetch_labels(selected_lang,[sense[4]])
src_meta= fetch_src_meta()
src_sid=fetch_src_for_s_id([s_id])
srel = fetch_srel()
## get the canonical form for each linked sense
slabel=fetch_sense_labels([x for v in slinks[int(s_id)].values() for x in v])
sdefs = fetch_defs_by_sense([s_id])
sdef = ''
if selected_lang in sdefs[s_id]:
sdef = sdefs[s_id][selected_lang] ## requested language
else:
sdef = sdefs[min(sdefs[s_id].keys())] ## a language
return render_template('omw_sense.html',
s_id = sID,
sdef = sdef,
sense = sense,
slinks = slinks[s_id],
srel = srel,
forms=forms,
langs = langs_id,
pos = pos,
labels = labels,
slabel = slabel,
src_sid = src_sid,
src_meta = src_meta)
# URIs FOR ORIGINAL CONCEPT KEYS, BY INDIVIDUAL SOURCES
@app.route('/omw/src/<src>/<originalkey>', methods=['GET', 'POST'])
def src_omw(src=None, originalkey=None):
try:
proj = src[:src.index('-')]
ver = src[src.index('-')+1:]
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
if src_id:
ss = fetch_ss_id_by_src_orginalkey(src_id, originalkey)
else:
ss = None
return concepts_omw(ss)
## show wn statistics
##
##
@app.route('/omw/src/<src>', methods=['GET', 'POST'])
def omw_wn(src=None):
if src:
try:
proj = src[:src.index('-')]
ver = src[src.index('-')+1:]
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
srcs_meta = fetch_src_meta()
src_info = srcs_meta[src_id]
return render_template('omw_wn.html',
wn = src,
src_id=src_id,
src_info=src_info,
ssrel_stats=fetch_ssrel_stats(src_id),
pos_stats= fetch_src_id_pos_stats(src_id),
src_stats=fetch_src_id_stats(src_id),
licenses=licenses)
@app.route('/omw/src-latex/<src>', methods=['GET', 'POST'])
def omw_wn_latex(src=None):
if src:
try:
proj = src[:src.index('-')]
ver = src[src.index('-')+1:]
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
srcs_meta = fetch_src_meta()
src_info = srcs_meta[src_id]
return render_template('omw_wn_latex.html',
wn = src,
src_id=src_id,
src_info=src_info,
ssrel_stats=fetch_ssrel_stats(src_id),
pos_stats= fetch_src_id_pos_stats(src_id),
src_stats=fetch_src_id_stats(src_id))
@app.route('/cili.tsv')
def generate_cili_tsv():
tsv="""# omw_id ili_id projects\n"""
srcs = fetch_src()
ss =dict()
r = query_omw_direct("SELECT id, ili_id from ss")
for (ss_id, ili_id) in r:
ss[ss_id] = [ili_id]
src = dd(list)
r = query_omw_direct("SELECT ss_id, src_id, src_key from ss_src")
for (ss_id, src_id, src_key) in r:
src[ss_id].append("{}-{}:{}".format(srcs[src_id][0],
srcs[src_id][1],
src_key))
for ss_id in ss:
ili = 'i' + str(ss[ss_id][0]) if ss[ss_id][0] else 'None'
tsv += "{}\t{}\t{}\n".format(ss_id, ili, ";".join(src[ss_id]))
return Response(tsv, mimetype='text/tab-separated-values')
@app.context_processor
def utility_processor():
def scale_freq(f, maxfreq=1000):
if f > 0:
return 100 + 100 * log(f)/log(maxfreq)
else:
return 100
return dict(scale_freq=scale_freq)
# def style_sense(freq, conf, lang):
# """show confidence as opacity, show freq as size
# opacity is the square of the confidence
# freq is scaled as a % of maxfreq for that language
# TODO: highlight a word if searched for?"""
# style = ''
# if conf and conf < 1.0: ## should not be more than 1.0
# style += 'opacity: {f};'.format(conf*conf) ## degrade quicker
# if freq:
# ### should I be using a log here?
# maxfreq=1000 #(should do per lang)
# style += 'font-size: {f}%;'.format(100*(1+ log(freq)/log(maxfreq)))
# if style:
# style = "style='{}'".format(style)
## show proj statistics
#for proj in fetch_proj/
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', threaded=True)
| |
# orm/loading.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from __future__ import absolute_import
from . import attributes
from . import exc as orm_exc
from . import path_registry
from . import strategy_options
from .base import _DEFER_FOR_STATE
from .base import _RAISE_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .util import _none_set
from .util import state_str
from .. import exc as sa_exc
from .. import future
from .. import util
from ..engine import result_tuple
from ..engine.result import ChunkedIteratorResult
from ..engine.result import FrozenResult
from ..engine.result import SimpleResultMetaData
from ..sql import util as sql_util
from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from ..sql.selectable import SelectState
_new_runid = util.counter()
def instances(cursor, context):
"""Return a :class:`.Result` given an ORM query context.
:param cursor: a :class:`.CursorResult`, generated by a statement
which came from :class:`.ORMCompileState`
:param context: a :class:`.QueryContext` object
:return: a :class:`.Result` object representing ORM results
.. versionchanged:: 1.4 The instances() function now uses
:class:`.Result` objects and has an all new interface.
"""
context.runid = _new_runid()
context.post_load_paths = {}
compile_state = context.compile_state
filtered = compile_state._has_mapper_entities
single_entity = (
not context.load_options._only_return_tuples
and len(compile_state._entities) == 1
and compile_state._entities[0].supports_single_entity
)
try:
(process, labels, extra) = list(
zip(
*[
query_entity.row_processor(context, cursor)
for query_entity in context.compile_state._entities
]
)
)
if context.yield_per and (
context.loaders_require_buffering
or context.loaders_require_uniquing
):
raise sa_exc.InvalidRequestError(
"Can't use yield_per with eager loaders that require uniquing "
"or row buffering, e.g. joinedload() against collections "
"or subqueryload(). Consider the selectinload() strategy "
"for better flexibility in loading objects."
)
except Exception:
with util.safe_reraise():
cursor.close()
def _no_unique(entry):
raise sa_exc.InvalidRequestError(
"Can't use the ORM yield_per feature in conjunction with unique()"
)
def _not_hashable(datatype):
def go(obj):
raise sa_exc.InvalidRequestError(
"Can't apply uniqueness to row tuple containing value of "
"type %r; this datatype produces non-hashable values"
% datatype
)
return go
if context.load_options._legacy_uniquing:
unique_filters = [
_no_unique
if context.yield_per
else id
if (
ent.use_id_for_hash
or ent._non_hashable_value
or ent._null_column_type
)
else None
for ent in context.compile_state._entities
]
else:
unique_filters = [
_no_unique
if context.yield_per
else _not_hashable(ent.column.type)
if (not ent.use_id_for_hash and ent._non_hashable_value)
else id
if ent.use_id_for_hash
else None
for ent in context.compile_state._entities
]
row_metadata = SimpleResultMetaData(
labels, extra, _unique_filters=unique_filters
)
def chunks(size):
while True:
yield_per = size
context.partials = {}
if yield_per:
fetch = cursor.fetchmany(yield_per)
if not fetch:
break
else:
fetch = cursor._raw_all_rows()
if single_entity:
proc = process[0]
rows = [proc(row) for row in fetch]
else:
rows = [
tuple([proc(row) for proc in process]) for row in fetch
]
for path, post_load in context.post_load_paths.items():
post_load.invoke(context, path)
yield rows
if not yield_per:
break
if context.execution_options.get("prebuffer_rows", False):
# this is a bit of a hack at the moment.
# I would rather have some option in the result to pre-buffer
# internally.
_prebuffered = list(chunks(None))
def chunks(size):
return iter(_prebuffered)
result = ChunkedIteratorResult(
row_metadata,
chunks,
source_supports_scalars=single_entity,
raw=cursor,
dynamic_yield_per=cursor.context._is_server_side,
)
# filtered and single_entity are used to indicate to legacy Query that the
# query has ORM entities, so legacy deduping and scalars should be called
# on the result.
result._attributes = result._attributes.union(
dict(filtered=filtered, is_single_entity=single_entity)
)
# multi_row_eager_loaders OTOH is specific to joinedload.
if context.compile_state.multi_row_eager_loaders:
def require_unique(obj):
raise sa_exc.InvalidRequestError(
"The unique() method must be invoked on this Result, "
"as it contains results that include joined eager loads "
"against collections"
)
result._unique_filter_state = (None, require_unique)
if context.yield_per:
result.yield_per(context.yield_per)
return result
@util.preload_module("sqlalchemy.orm.context")
def merge_frozen_result(session, statement, frozen_result, load=True):
"""Merge a :class:`_engine.FrozenResult` back into a :class:`_orm.Session`,
returning a new :class:`_engine.Result` object with :term:`persistent`
objects.
See the section :ref:`do_orm_execute_re_executing` for an example.
.. seealso::
:ref:`do_orm_execute_re_executing`
:meth:`_engine.Result.freeze`
:class:`_engine.FrozenResult`
"""
querycontext = util.preloaded.orm_context
if load:
# flush current contents if we expect to load data
session._autoflush()
ctx = querycontext.ORMSelectCompileState._create_entities_collection(
statement, legacy=False
)
autoflush = session.autoflush
try:
session.autoflush = False
mapped_entities = [
i
for i, e in enumerate(ctx._entities)
if isinstance(e, querycontext._MapperEntity)
]
keys = [ent._label_name for ent in ctx._entities]
keyed_tuple = result_tuple(
keys, [ent._extra_entities for ent in ctx._entities]
)
result = []
for newrow in frozen_result.rewrite_rows():
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load,
_recursive={},
_resolve_conflict_map={},
)
result.append(keyed_tuple(newrow))
return frozen_result.with_new_rows(result)
finally:
session.autoflush = autoflush
@util.deprecated(
"2.0",
"The :func:`_orm.merge_result` method is superseded by the "
":func:`_orm.merge_frozen_result` function.",
)
@util.preload_module("sqlalchemy.orm.context")
def merge_result(query, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session."""
querycontext = util.preloaded.orm_context
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
# TODO: need test coverage and documentation for the FrozenResult
# use case.
if isinstance(iterator, FrozenResult):
frozen_result = iterator
iterator = iter(frozen_result.data)
else:
frozen_result = None
ctx = querycontext.ORMSelectCompileState._create_entities_collection(
query, legacy=True
)
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = not frozen_result and len(ctx._entities) == 1
if single_entity:
if isinstance(ctx._entities[0], querycontext._MapperEntity):
result = [
session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load,
_recursive={},
_resolve_conflict_map={},
)
for instance in iterator
]
else:
result = list(iterator)
else:
mapped_entities = [
i
for i, e in enumerate(ctx._entities)
if isinstance(e, querycontext._MapperEntity)
]
result = []
keys = [ent._label_name for ent in ctx._entities]
keyed_tuple = result_tuple(
keys, [ent._extra_entities for ent in ctx._entities]
)
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load,
_recursive={},
_resolve_conflict_map={},
)
result.append(keyed_tuple(newrow))
if frozen_result:
return frozen_result.with_data(result)
else:
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(session, mapper, key, passive):
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
if mapper.inherits and not state.mapper.isa(mapper):
return attributes.PASSIVE_CLASS_MISMATCH
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary.
# also used by immediateloader for a mutually-dependent
# o2m->m2m load, :ticket:`6301`
return instance
try:
state._load_expired(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(
session,
statement,
key,
load_options=None,
refresh_state=None,
with_for_update=None,
only_load_props=None,
no_autoflush=False,
bind_arguments=util.EMPTY_DICT,
execution_options=util.EMPTY_DICT,
):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
identity_token = key[2]
else:
ident = identity_token = None
return load_on_pk_identity(
session,
statement,
ident,
load_options=load_options,
refresh_state=refresh_state,
with_for_update=with_for_update,
only_load_props=only_load_props,
identity_token=identity_token,
no_autoflush=no_autoflush,
bind_arguments=bind_arguments,
execution_options=execution_options,
)
def load_on_pk_identity(
session,
statement,
primary_key_identity,
load_options=None,
refresh_state=None,
with_for_update=None,
only_load_props=None,
identity_token=None,
no_autoflush=False,
bind_arguments=util.EMPTY_DICT,
execution_options=util.EMPTY_DICT,
):
"""Load the given primary key identity from the database."""
query = statement
q = query._clone()
assert not q._is_lambda_element
# TODO: fix these imports ....
from .context import QueryContext, ORMCompileState
if load_options is None:
load_options = QueryContext.default_load_options
if (
statement._compile_options
is SelectState.default_select_compile_options
):
compile_options = ORMCompileState.default_compile_options
else:
compile_options = statement._compile_options
if primary_key_identity is not None:
mapper = query._propagate_attrs["plugin_subject"]
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
nones = set(
[
_get_params[col].key
for col, value in zip(
mapper.primary_key, primary_key_identity
)
if value is None
]
)
_get_clause = sql_util.adapt_criterion_to_null(_get_clause, nones)
if len(nones) == len(primary_key_identity):
util.warn(
"fully NULL primary key identity cannot load any "
"object. This condition may raise an error in a future "
"release."
)
q._where_criteria = (
sql_util._deep_annotate(_get_clause, {"_orm_adapt": True}),
)
params = dict(
[
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(
primary_key_identity, mapper.primary_key
)
]
)
else:
params = None
if with_for_update is not None:
version_check = True
q._for_update_arg = with_for_update
elif query._for_update_arg is not None:
version_check = True
q._for_update_arg = query._for_update_arg
else:
version_check = False
if refresh_state and refresh_state.load_options:
compile_options += {"_current_path": refresh_state.load_path.parent}
q = q.options(*refresh_state.load_options)
new_compile_options, load_options = _set_get_options(
compile_options,
load_options,
version_check=version_check,
only_load_props=only_load_props,
refresh_state=refresh_state,
identity_token=identity_token,
)
q._compile_options = new_compile_options
q._order_by = None
if no_autoflush:
load_options += {"_autoflush": False}
execution_options = util.EMPTY_DICT.merge_with(
execution_options, {"_sa_orm_load_options": load_options}
)
result = (
session.execute(
q,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
)
.unique()
.scalars()
)
try:
return result.one()
except orm_exc.NoResultFound:
return None
def _set_get_options(
compile_opt,
load_opt,
populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None,
identity_token=None,
):
compile_options = {}
load_options = {}
if version_check:
load_options["_version_check"] = version_check
if populate_existing:
load_options["_populate_existing"] = populate_existing
if refresh_state:
load_options["_refresh_state"] = refresh_state
compile_options["_for_refresh_state"] = True
if only_load_props:
compile_options["_only_load_props"] = frozenset(only_load_props)
if identity_token:
load_options["_refresh_identity_token"] = identity_token
if load_options:
load_opt += load_options
if compile_options:
compile_opt += compile_options
return compile_opt, load_opt
def _setup_entity_query(
compile_state,
mapper,
query_entity,
path,
adapter,
column_collection,
with_polymorphic=None,
only_load_props=None,
polymorphic_discriminator=None,
**kw
):
if with_polymorphic:
poly_properties = mapper._iterate_polymorphic_properties(
with_polymorphic
)
else:
poly_properties = mapper._polymorphic_properties
quick_populators = {}
path.set(compile_state.attributes, "memoized_setups", quick_populators)
# for the lead entities in the path, e.g. not eager loads, and
# assuming a user-passed aliased class, e.g. not a from_self() or any
# implicit aliasing, don't add columns to the SELECT that aren't
# in the thing that's aliased.
check_for_adapt = adapter and len(path) == 1 and path[-1].is_aliased_class
for value in poly_properties:
if only_load_props and value.key not in only_load_props:
continue
value.setup(
compile_state,
query_entity,
path,
adapter,
only_load_props=only_load_props,
column_collection=column_collection,
memoized_populators=quick_populators,
check_for_adapt=check_for_adapt,
**kw
)
if (
polymorphic_discriminator is not None
and polymorphic_discriminator is not mapper.polymorphic_on
):
if adapter:
pd = adapter.columns[polymorphic_discriminator]
else:
pd = polymorphic_discriminator
column_collection.append(pd)
def _warn_for_runid_changed(state):
util.warn(
"Loading context for %s has changed within a load/refresh "
"handler, suggesting a row refresh operation took place. If this "
"event handler is expected to be "
"emitting row refresh operations within an existing load or refresh "
"operation, set restore_load_context=True when establishing the "
"listener to ensure the context remains unchanged when the event "
"handler completes." % (state_str(state),)
)
def _instance_processor(
query_entity,
mapper,
context,
result,
path,
adapter,
only_load_props=None,
refresh_state=None,
polymorphic_discriminator=None,
_polymorphic_from=None,
):
"""Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
identity_class = mapper._identity_class
compile_state = context.compile_state
# look for "row getter" functions that have been assigned along
# with the compile state that were cached from a previous load.
# these are operator.itemgetter() objects that each will extract a
# particular column from each row.
getter_key = ("getters", mapper)
getters = path.get(compile_state.attributes, getter_key, None)
if getters is None:
# no getters, so go through a list of attributes we are loading for,
# and the ones that are column based will have already put information
# for us in another collection "memoized_setups", which represents the
# output of the LoaderStrategy.setup_query() method. We can just as
# easily call LoaderStrategy.create_row_processor for each, but by
# getting it all at once from setup_query we save another method call
# per attribute.
props = mapper._prop_set
if only_load_props is not None:
props = props.intersection(
mapper._props[k] for k in only_load_props
)
quick_populators = path.get(
context.attributes, "memoized_setups", _none_set
)
todo = []
cached_populators = {
"new": [],
"quick": [],
"deferred": [],
"expire": [],
"delayed": [],
"existing": [],
"eager": [],
}
if refresh_state is None:
# we can also get the "primary key" tuple getter function
pk_cols = mapper.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
primary_key_getter = result._tuple_getter(pk_cols)
else:
primary_key_getter = None
getters = {
"cached_populators": cached_populators,
"todo": todo,
"primary_key_getter": primary_key_getter,
}
for prop in props:
if prop in quick_populators:
# this is an inlined path just for column-based attributes.
col = quick_populators[prop]
if col is _DEFER_FOR_STATE:
cached_populators["new"].append(
(prop.key, prop._deferred_column_loader)
)
elif col is _SET_DEFERRED_EXPIRED:
# note that in this path, we are no longer
# searching in the result to see if the column might
# be present in some unexpected way.
cached_populators["expire"].append((prop.key, False))
elif col is _RAISE_FOR_STATE:
cached_populators["new"].append(
(prop.key, prop._raise_column_loader)
)
else:
getter = None
if adapter:
# this logic had been removed for all 1.4 releases
# up until 1.4.18; the adapter here is particularly
# the compound eager adapter which isn't accommodated
# in the quick_populators right now. The "fallback"
# logic below instead took over in many more cases
# until issue #6596 was identified.
# note there is still an issue where this codepath
# produces no "getter" for cases where a joined-inh
# mapping includes a labeled column property, meaning
# KeyError is caught internally and we fall back to
# _getter(col), which works anyway. The adapter
# here for joined inh without any aliasing might not
# be useful. Tests which see this include
# test.orm.inheritance.test_basic ->
# EagerTargetingTest.test_adapt_stringency
# OptimizedLoadTest.test_column_expression_joined
# PolymorphicOnNotLocalTest.test_polymorphic_on_column_prop # noqa E501
#
adapted_col = adapter.columns[col]
if adapted_col is not None:
getter = result._getter(adapted_col, False)
if not getter:
getter = result._getter(col, False)
if getter:
cached_populators["quick"].append((prop.key, getter))
else:
# fall back to the ColumnProperty itself, which
# will iterate through all of its columns
# to see if one fits
prop.create_row_processor(
context,
query_entity,
path,
mapper,
result,
adapter,
cached_populators,
)
else:
# loader strategies like subqueryload, selectinload,
# joinedload, basically relationships, these need to interact
# with the context each time to work correctly.
todo.append(prop)
path.set(compile_state.attributes, getter_key, getters)
cached_populators = getters["cached_populators"]
populators = {key: list(value) for key, value in cached_populators.items()}
for prop in getters["todo"]:
prop.create_row_processor(
context, query_entity, path, mapper, result, adapter, populators
)
propagated_loader_options = context.propagated_loader_options
load_path = (
context.compile_state.current_path + path
if context.compile_state.current_path.path
else path
)
session_identity_map = context.session.identity_map
populate_existing = context.populate_existing or mapper.always_refresh
load_evt = bool(mapper.class_manager.dispatch.load)
refresh_evt = bool(mapper.class_manager.dispatch.refresh)
persistent_evt = bool(context.session.dispatch.loaded_as_persistent)
if persistent_evt:
loaded_as_persistent = context.session.dispatch.loaded_as_persistent
instance_state = attributes.instance_state
instance_dict = attributes.instance_dict
session_id = context.session.hash_key
runid = context.runid
identity_token = context.identity_token
version_check = context.version_check
if version_check:
version_id_col = mapper.version_id_col
if version_id_col is not None:
if adapter:
version_id_col = adapter.columns[version_id_col]
version_id_getter = result._getter(version_id_col)
else:
version_id_getter = None
if not refresh_state and _polymorphic_from is not None:
key = ("loader", path.path)
if key in context.attributes and context.attributes[key].strategy == (
("selectinload_polymorphic", True),
):
selectin_load_via = mapper._should_selectin_load(
context.attributes[key].local_opts["entities"],
_polymorphic_from,
)
else:
selectin_load_via = mapper._should_selectin_load(
None, _polymorphic_from
)
if selectin_load_via and selectin_load_via is not _polymorphic_from:
# only_load_props goes w/ refresh_state only, and in a refresh
# we are a single row query for the exact entity; polymorphic
# loading does not apply
assert only_load_props is None
callable_ = _load_subclass_via_in(context, path, selectin_load_via)
PostLoad.callable_for_path(
context,
load_path,
selectin_load_via.mapper,
selectin_load_via,
callable_,
selectin_load_via,
)
post_load = PostLoad.for_context(context, load_path, only_load_props)
if refresh_state:
refresh_identity_key = refresh_state.key
if refresh_identity_key is None:
# super-rare condition; a refresh is being called
# on a non-instance-key instance; this is meant to only
# occur within a flush()
refresh_identity_key = mapper._identity_key_from_state(
refresh_state
)
else:
refresh_identity_key = None
primary_key_getter = getters["primary_key_getter"]
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
else:
is_not_primary_key = _none_set.intersection
def _instance(row):
# determine the state that we'll be populating
if refresh_identity_key:
# fixed state that we're refreshing
state = refresh_state
instance = state.obj()
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = True
loaded_instance = False
else:
# look at the row, see if that identity is in the
# session, or we have to create a new one
identitykey = (
identity_class,
primary_key_getter(row),
identity_token,
)
instance = session_identity_map.get(identitykey)
if instance is not None:
# existing instance
state = instance_state(instance)
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = not isnew
loaded_instance = False
if version_check and version_id_getter and not currentload:
_validate_version_id(
mapper, state, dict_, row, version_id_getter
)
else:
# create a new instance
# check for non-NULL values in the primary key columns,
# else no entity is returned for the row
if is_not_primary_key(identitykey[1]):
return None
isnew = True
currentload = True
loaded_instance = True
instance = mapper.class_manager.new_instance()
dict_ = instance_dict(instance)
state = instance_state(instance)
state.key = identitykey
state.identity_token = identity_token
# attach instance to session.
state.session_id = session_id
session_identity_map._add_unpresent(state, identitykey)
effective_populate_existing = populate_existing
if refresh_state is state:
effective_populate_existing = True
# populate. this looks at whether this state is new
# for this load or was existing, and whether or not this
# row is the first row with this identity.
if currentload or effective_populate_existing:
# full population routines. Objects here are either
# just created, or we are doing a populate_existing
# be conservative about setting load_path when populate_existing
# is in effect; want to maintain options from the original
# load. see test_expire->test_refresh_maintains_deferred_options
if isnew and (
propagated_loader_options or not effective_populate_existing
):
state.load_options = propagated_loader_options
state.load_path = load_path
_populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
effective_populate_existing,
populators,
)
if isnew:
# state.runid should be equal to context.runid / runid
# here, however for event checks we are being more conservative
# and checking against existing run id
# assert state.runid == runid
existing_runid = state.runid
if loaded_instance:
if load_evt:
state.manager.dispatch.load(state, context)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
if persistent_evt:
loaded_as_persistent(context.session, state)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
elif refresh_evt:
state.manager.dispatch.refresh(
state, context, only_load_props
)
if state.runid != runid:
_warn_for_runid_changed(state)
if effective_populate_existing or state.modified:
if refresh_state and only_load_props:
state._commit(dict_, only_load_props)
else:
state._commit_all(dict_, session_identity_map)
if post_load:
post_load.add_state(state, True)
else:
# partial population routines, for objects that were already
# in the Session, but a row matches them; apply eager loaders
# on existing objects, etc.
unloaded = state.unloaded
isnew = state not in context.partials
if not isnew or unloaded or populators["eager"]:
# state is having a partial set of its attributes
# refreshed. Populate those attributes,
# and add to the "context.partials" collection.
to_load = _populate_partial(
context,
row,
state,
dict_,
isnew,
load_path,
unloaded,
populators,
)
if isnew:
if refresh_evt:
existing_runid = state.runid
state.manager.dispatch.refresh(state, context, to_load)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
state._commit(dict_, to_load)
if post_load and context.invoke_all_eagers:
post_load.add_state(state, False)
return instance
if mapper.polymorphic_map and not _polymorphic_from and not refresh_state:
# if we are doing polymorphic, dispatch to a different _instance()
# method specific to the subclass mapper
def ensure_no_pk(row):
identitykey = (
identity_class,
primary_key_getter(row),
identity_token,
)
if not is_not_primary_key(identitykey[1]):
return identitykey
else:
return None
_instance = _decorate_polymorphic_switch(
_instance,
context,
query_entity,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
ensure_no_pk,
)
return _instance
def _load_subclass_via_in(context, path, entity):
mapper = entity.mapper
zero_idx = len(mapper.base_mapper.primary_key) == 1
if entity.is_aliased_class:
q, enable_opt, disable_opt = mapper._subclass_load_via_in(entity)
else:
q, enable_opt, disable_opt = mapper._subclass_load_via_in_mapper
def do_load(context, path, states, load_only, effective_entity):
orig_query = context.query
options = (enable_opt,) + orig_query._with_options + (disable_opt,)
q2 = q.options(*options)
q2._compile_options = context.compile_state.default_compile_options
q2._compile_options += {"_current_path": path.parent}
if context.populate_existing:
q2 = q2.execution_options(populate_existing=True)
context.session.execute(
q2,
dict(
primary_keys=[
state.key[1][0] if zero_idx else state.key[1]
for state, load_attrs in states
]
),
).unique().scalars().all()
return do_load
def _populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
populate_existing,
populators,
):
if isnew:
# first time we are seeing a row with this identity.
state.runid = context.runid
for key, getter in populators["quick"]:
dict_[key] = getter(row)
if populate_existing:
for key, set_callable in populators["expire"]:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
else:
for key, set_callable in populators["expire"]:
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
populator(state, dict_, row)
elif load_path != state.load_path:
# new load path, e.g. object is present in more than one
# column position in a series of rows
state.load_path = load_path
# if we have data, and the data isn't in the dict, OK, let's put
# it in.
for key, getter in populators["quick"]:
if key not in dict_:
dict_[key] = getter(row)
# otherwise treat like an "already seen" row
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: allow "existing" populator to know this is
# a new path for the state:
# populator(state, dict_, row, new_path=True)
else:
# have already seen rows with this identity in this same path.
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: same path
# populator(state, dict_, row, new_path=False)
def _populate_partial(
context, row, state, dict_, isnew, load_path, unloaded, populators
):
if not isnew:
to_load = context.partials[state]
for key, populator in populators["existing"]:
if key in to_load:
populator(state, dict_, row)
else:
to_load = unloaded
context.partials[state] = to_load
for key, getter in populators["quick"]:
if key in to_load:
dict_[key] = getter(row)
for key, set_callable in populators["expire"]:
if key in to_load:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["eager"]:
if key not in unloaded:
populator(state, dict_, row)
return to_load
def _validate_version_id(mapper, state, dict_, row, getter):
if mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col
) != getter(row):
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (
state_str(state),
mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col
),
getter(row),
)
)
def _decorate_polymorphic_switch(
instance_fn,
context,
query_entity,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
ensure_no_pk,
):
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
if polymorphic_on is None:
return instance_fn
if adapter:
polymorphic_on = adapter.columns[polymorphic_on]
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" % discriminator
)
else:
if sub_mapper is mapper:
return None
elif not sub_mapper.isa(mapper):
return False
return _instance_processor(
query_entity,
sub_mapper,
context,
result,
path,
adapter,
_polymorphic_from=mapper,
)
polymorphic_instances = util.PopulateDict(configure_subclass_mapper)
getter = result._getter(polymorphic_on)
def polymorphic_instance(row):
discriminator = getter(row)
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row)
elif _instance is False:
identitykey = ensure_no_pk(row)
if identitykey:
raise sa_exc.InvalidRequestError(
"Row with identity key %s can't be loaded into an "
"object; the polymorphic discriminator column '%s' "
"refers to %s, which is not a sub-mapper of "
"the requested %s"
% (
identitykey,
polymorphic_on,
mapper.polymorphic_map[discriminator],
mapper,
)
)
else:
return None
else:
return instance_fn(row)
else:
identitykey = ensure_no_pk(row)
if identitykey:
raise sa_exc.InvalidRequestError(
"Row with identity key %s can't be loaded into an "
"object; the polymorphic discriminator column '%s' is "
"NULL" % (identitykey, polymorphic_on)
)
else:
return None
return polymorphic_instance
class PostLoad(object):
"""Track loaders and states for "post load" operations."""
__slots__ = "loaders", "states", "load_keys"
def __init__(self):
self.loaders = {}
self.states = util.OrderedDict()
self.load_keys = None
def add_state(self, state, overwrite):
# the states for a polymorphic load here are all shared
# within a single PostLoad object among multiple subtypes.
# Filtering of callables on a per-subclass basis needs to be done at
# the invocation level
self.states[state] = overwrite
def invoke(self, context, path):
if not self.states:
return
path = path_registry.PathRegistry.coerce(path)
for token, limit_to_mapper, loader, arg, kw in self.loaders.values():
states = [
(state, overwrite)
for state, overwrite in self.states.items()
if state.manager.mapper.isa(limit_to_mapper)
]
if states:
loader(context, path, states, self.load_keys, *arg, **kw)
self.states.clear()
@classmethod
def for_context(cls, context, path, only_load_props):
pl = context.post_load_paths.get(path.path)
if pl is not None and only_load_props:
pl.load_keys = only_load_props
return pl
@classmethod
def path_exists(self, context, path, key):
return (
path.path in context.post_load_paths
and key in context.post_load_paths[path.path].loaders
)
@classmethod
def callable_for_path(
cls, context, path, limit_to_mapper, token, loader_callable, *arg, **kw
):
if path.path in context.post_load_paths:
pl = context.post_load_paths[path.path]
else:
pl = context.post_load_paths[path.path] = PostLoad()
pl.loaders[token] = (token, limit_to_mapper, loader_callable, arg, kw)
def load_scalar_attributes(mapper, state, attribute_names, passive):
"""initiate a column-based attribute refresh operation."""
# assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" % (state_str(state))
)
has_key = bool(state.key)
result = False
no_autoflush = (
bool(passive & attributes.NO_AUTOFLUSH) or state.session.autocommit
)
# in the case of inheritance, particularly concrete and abstract
# concrete inheritance, the class manager might have some keys
# of attributes on the superclass that we didn't actually map.
# These could be mapped as "concrete, don't load" or could be completely
# excluded from the mapping and we know nothing about them. Filter them
# here to prevent them from coming through.
if attribute_names:
attribute_names = attribute_names.intersection(mapper.attrs.keys())
if mapper.inherits and not mapper.concrete:
# because we are using Core to produce a select() that we
# pass to the Query, we aren't calling setup() for mapped
# attributes; in 1.0 this means deferred attrs won't get loaded
# by default
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
# this was previously aliased(mapper, statement), however,
# statement is a select() and Query's coercion now raises for this
# since you can't "select" from a "SELECT" statement. only
# from_statement() allows this.
# note: using from_statement() here means there is an adaption
# with adapt_on_names set up. the other option is to make the
# aliased() against a subquery which affects the SQL.
from .query import FromStatement
stmt = FromStatement(mapper, statement).options(
strategy_options.Load(mapper).undefer("*")
)
result = load_on_ident(
session,
stmt,
None,
only_load_props=attribute_names,
refresh_state=state,
no_autoflush=no_autoflush,
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [
mapper._columntoproperty[col].key for col in mapper.primary_key
]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state)
)
identity_key = mapper._identity_key_from_state(state)
if (
_none_set.issubset(identity_key) and not mapper.allow_partial_pks
) or _none_set.issuperset(identity_key):
util.warn_limited(
"Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either).",
state_str(state),
)
return
result = load_on_ident(
session,
future.select(mapper).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
identity_key,
refresh_state=state,
only_load_props=attribute_names,
no_autoflush=no_autoflush,
)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
| |
"""
Handle data encryption and decryption while transferring the file to and
from the cloud.
"""
import base64
import errno
import gnupg
import json
import os
import tempfile
import urllib
import urllib2
from StringIO import StringIO
from lib import checksum_data, checksum_file
from lib.encryption import generate_random_password, encrypt, decrypt
METADATA_VERSION = 1
gpg = gnupg.GPG(use_agent=True)
class GPGError(Exception):
"""
Exception raised if GPG encryption or decryption fails.
"""
def __init__(self, result):
self.result = result
def __str__(self):
return self.result.status
class MetadataError(Exception):
"""
Exception raised for metadata errors.
"""
def __init__(self, key, message):
self.key = key
self.message = message
def __str__(self):
return self.message
class DataError(MetadataError):
"""
Exception raised for data errors.
"""
pass
class Provider(object):
"""
Base class for cloud provider.
"""
def __init__(self, config, bucket_name, encryption_method="gpg"):
"""
Initialize cloud provider.
"""
self.config = config
self.config.check("general", ["database"])
self.config.check("gnupg", ["recipients", "signer"])
self.bucket_name = bucket_name
if encryption_method.lower() not in [
"gpg", "symmetric", "cryptoengine", ]:
raise ValueError(
"Encryption method must be either 'gpg', 'symmetric' or "
"'cryptoengine'")
self.encryption_method = encryption_method
if self.encryption_method == "cryptoengine":
self.config.check("cryptoengine", ["api_url"])
@property
def __name__(self):
return "cloud-provider-bucket:" + self.bucket_name
def __str__(self):
return self.__name__
def connect(self):
"""
Connect to cloud provider.
"""
pass
def disconnect(self):
"""
Disconnect from cloud provider.
"""
pass
def store(self, key, data):
"""
Store data to cloud provider.
"""
pass
def store_from_filename(self, key, filename):
"""
Store data to cloud provider from file.
"""
pass
def retrieve(self, key):
"""
Retrieve data from cloud provider. Return data as string.
"""
return None
def retrieve_to_filename(self, key, filename):
"""
Retrieve data from cloud provider. Write data to file.
"""
pass
def delete(self, key):
"""
Delete data from cloud provider.
"""
pass
def list(self):
"""
List data in cloud provider. Return dictionary of keys with
data.
"""
return dict()
def list_keys(self):
"""
List data keys in cloud provider.
"""
return dict()
class Cloud(object):
"""
Basic class for cloud access.
"""
def __init__(self, config, metadata_provider, provider, database):
self.config = config
self.metadata_provider = metadata_provider
self.provider = provider
self.database = database
self.recipients = self.config.config.get(
"gnupg", "recipients").split(",")
self.signer = self.config.config.get("gnupg", "signer")
def _create_metadata(self, key, filename=None, size=0, stat_info=None,
checksum=None, encryption_key=None,
encrypted_size=0, encrypted_checksum=None):
metadata = dict(
metadata_version=METADATA_VERSION,
provider=self.metadata_provider.__name__, key=key, name=None,
path=None, size=size, mode=0, uid=0, gid=0, atime=0, mtime=0,
ctime=0, checksum=None, encryption_key=encryption_key,
encrypted_size=encrypted_size, encrypted_checksum=None)
if filename is not None:
metadata["name"] = os.path.basename(filename)
metadata["path"] = filename
if stat_info is not None:
metadata["mode"] = stat_info.st_mode
metadata["uid"] = stat_info.st_uid
metadata["gid"] = stat_info.st_gid
metadata["atime"] = stat_info.st_atime
metadata["ctime"] = stat_info.st_ctime
metadata["mtime"] = stat_info.st_mtime
if checksum is not None:
metadata["checksum"] = checksum
if encrypted_checksum is not None:
metadata["encrypted_checksum"] = encrypted_checksum
return metadata
def connect(self):
"""
Open connection to cloud.
"""
self.metadata_provider.connect()
self.provider.connect()
return self
def disconnect(self):
"""
Close cloud connection.
"""
self.metadata_provider.disconnect()
self.provider.disconnect()
def sync(self):
"""
Sync metadata database from cloud.
"""
self.database.drop(provider=self.metadata_provider.__name__)
for key, encrypted_metadata in self.metadata_provider.list().items():
metadata = gpg.decrypt(encrypted_metadata)
if not metadata.ok:
raise GPGError(metadata)
if not metadata.data:
raise MetadataError(key, "No metadata")
try:
metadata = json.loads(metadata.data)
except ValueError as e:
raise MetadataError(key, "Invalid metadata: {0}".format(e))
if "metadata_version" not in metadata:
raise MetadataError(key, "No metadata version available")
if metadata["metadata_version"] != METADATA_VERSION:
raise MetadataError(
key, "Wrong metadata version: {0} != {1}".format(
metadata["metadata_version"], METADATA_VERSION))
self.database.update(metadata)
def list(self):
"""
List metadata from database.
"""
metadata = list()
for m in self.database.list(provider=self.metadata_provider.__name__):
metadata.append(m)
return metadata
def find(self, **filter):
"""
Find metadata in database.
"""
return self.database.find(**filter)
def find_one(self, **filter):
"""
Find metadata in database.
"""
return self.database.find_one(**filter)
def _cryptoengine_encrypt(self, data, encryption_key):
"""
Encrypt data in crypto engine server.
"""
data = urllib.urlencode(
{'data': base64.encodestring(data), 'key': encryption_key, })
u = urllib2.urlopen(
self.config.config.get("cryptoengine", "api_url") + '/encrypt',
data)
return json.loads(u.read())
def _cryptoengine_decrypt(self, data, encryption_key):
"""
Decrypt data in crypto engine server.
"""
data = urllib.urlencode({'data': data, 'key': encryption_key, })
u = urllib2.urlopen(
self.config.config.get("cryptoengine", "api_url") + '/decrypt',
data)
result = json.loads(u.read())
result["data"] = base64.decodestring(result["data"])
return result
def _encrypt_gpg(self, data):
encryption_key = None
encrypted_data = gpg.encrypt(
data, self.recipients, sign=self.signer)
if not encrypted_data.ok:
raise GPGError(encrypted_data)
encrypted_size = len(encrypted_data.data)
encrypted_checksum = checksum_data(encrypted_data.data)
return (encryption_key, encrypted_data.data, encrypted_size,
encrypted_checksum)
def _encrypt_file_gpg(self, plaintext_file, encrypted_file):
encryption_key = None
encrypted_data = gpg.encrypt_file(
file(plaintext_file), self.recipients, sign=self.signer,
output=encrypted_file)
if not encrypted_data.ok:
raise GPGError(encrypted_data)
encrypted_stat_info = os.stat(encrypted_file)
encrypted_checksum = checksum_file(encrypted_file)
encrypted_size = encrypted_stat_info.st_size
return (encryption_key, encrypted_size, encrypted_checksum)
def _encrypt_symmetric(self, data):
encryption_key = generate_random_password()
plaintext_fp = StringIO(data)
encrypted_fp = StringIO()
encrypt(plaintext_fp, encrypted_fp, encryption_key)
encrypted_fp.seek(0)
encrypted_data = encrypted_fp.read()
base64_data = base64.encodestring(encrypted_data)
base64_size = len(base64_data)
encrypted_checksum = checksum_data(base64_data)
return (encryption_key, base64_data, base64_size,
encrypted_checksum)
def _encrypt_cryptoengine(self, data):
encryption_key = generate_random_password()
result = self._cryptoengine_encrypt(data, encryption_key)
base64_data = result["encrypted_data"]
base64_size = len(base64_data)
encrypted_checksum = result["encrypted_checksum"]
return (encryption_key, base64_data, base64_size,
encrypted_checksum)
def _encrypt_file_symmetric(self, plaintext_file, encrypted_file):
encryption_key = generate_random_password()
plaintext_fp = file(plaintext_file)
encrypted_fp = tempfile.TemporaryFile()
encrypt(plaintext_fp, encrypted_fp, encryption_key)
encrypted_fp.flush()
encrypted_fp.seek(0)
base64_fp = file(encrypted_file, "wb")
base64.encode(encrypted_fp, base64_fp)
base64_fp.close()
encrypted_fp.close()
plaintext_fp.close()
encrypted_stat_info = os.stat(encrypted_file)
encrypted_size = encrypted_stat_info.st_size
encrypted_checksum = checksum_file(encrypted_file)
return (encryption_key, encrypted_size, encrypted_checksum)
def _encrypt_file_cryptoengine(self, plaintext_file, encrypted_file):
encryption_key = generate_random_password()
data = file(plaintext_file).read()
result = self._cryptoengine_encrypt(data, encryption_key)
base64_data = result["encrypted_data"]
base64_size = len(base64_data)
encrypted_checksum = result["encrypted_checksum"]
encrypted_fp = file(encrypted_file, "wb")
encrypted_fp.write(base64_data)
encrypted_fp.close()
return (encryption_key, base64_size, encrypted_checksum)
def store(self, data, cloud_filename, stat_info=None):
"""
Encrypt data and store it to cloud.
"""
key = checksum_data(data + cloud_filename)
checksum = checksum_data(data)
size = len(data)
# Do we have the data already stored into cloud?
old_metadata = self.database.find_one(
provider=self.metadata_provider.__name__, checksum=checksum)
if old_metadata:
encrypted_data = None
encryption_key = old_metadata["encryption_key"]
encrypted_checksum = old_metadata["encrypted_checksum"]
encrypted_size = old_metadata["encrypted_size"]
else:
# Create encrypted data.
if self.provider.encryption_method == "symmetric":
(encryption_key, encrypted_data, encrypted_size,
encrypted_checksum) = self._encrypt_symmetric(data)
elif self.provider.encryption_method == "cryptoengine":
(encryption_key, encrypted_data, encrypted_size,
encrypted_checksum) = self._encrypt_cryptoengine(data)
else:
(encryption_key, encrypted_data, encrypted_size,
encrypted_checksum) = self._encrypt_gpg(data)
# Create encrypted metadata.
metadata = self._create_metadata(
key, filename=cloud_filename, size=size, stat_info=stat_info,
checksum=checksum, encryption_key=encryption_key,
encrypted_size=encrypted_size,
encrypted_checksum=encrypted_checksum)
encrypted_metadata = gpg.encrypt(
json.dumps(metadata), self.recipients, sign=self.signer)
if not encrypted_metadata.ok:
raise GPGError(encrypted_metadata)
# Store metadata and data to cloud and update database.
self.metadata_provider.store(key, encrypted_metadata.data)
if not old_metadata:
self.provider.store(checksum, encrypted_data)
self.database.update(metadata)
return metadata
def store_from_filename(self, filename, cloud_filename=None):
"""
Encrypt file data and store it to cloud.
"""
if cloud_filename is None:
cloud_filename = filename
stat_info = os.stat(filename)
key = checksum_file(filename, extra_data=cloud_filename)
checksum = checksum_file(filename)
size = stat_info.st_size
# Do we have the data already stored into cloud?
old_metadata = self.database.find_one(
provider=self.metadata_provider.__name__, checksum=checksum)
if old_metadata:
encrypted_file = None
encryption_key = old_metadata["encryption_key"]
encrypted_checksum = old_metadata["encrypted_checksum"]
encrypted_size = old_metadata["encrypted_size"]
else:
# Create encrypted data file.
encrypted_file = tempfile.NamedTemporaryFile()
if self.provider.encryption_method == "symmetric":
(encryption_key, encrypted_size, encrypted_checksum) =\
self._encrypt_file_symmetric(filename, encrypted_file.name)
elif self.provider.encryption_method == "cryptoengine":
(encryption_key, encrypted_size, encrypted_checksum) =\
self._encrypt_file_cryptoengine(
filename, encrypted_file.name)
else:
(encryption_key, encrypted_size, encrypted_checksum) =\
self._encrypt_file_gpg(filename, encrypted_file.name)
# Create encrypted metadata.
metadata = self._create_metadata(
key, filename=cloud_filename, size=size, stat_info=stat_info,
checksum=checksum, encryption_key=encryption_key,
encrypted_size=encrypted_size,
encrypted_checksum=encrypted_checksum)
encrypted_metadata = gpg.encrypt(
json.dumps(metadata), self.recipients, sign=self.signer)
if not encrypted_metadata.ok:
raise GPGError(encrypted_metadata)
# Store metadata and data to cloud and update database.
self.metadata_provider.store(key, encrypted_metadata.data)
if not old_metadata:
self.provider.store_from_filename(checksum, encrypted_file.name)
self.database.update(metadata)
return metadata
def _decrypt_gpg(self, encrypted_data):
data = gpg.decrypt(encrypted_data)
if not data.ok:
raise GPGError(data)
checksum = checksum_data(data.data)
return data.data, checksum
def _decrypt_file_gpg(self, encrypted_file, plaintext_file):
data = gpg.decrypt_file(
file(encrypted_file), output=plaintext_file)
if not data.ok:
raise GPGError(data)
checksum = checksum_file(plaintext_file)
return checksum
def _decrypt_symmetric(self, encrypted_data, encryption_key):
encrypted_fp = StringIO(base64.decodestring(encrypted_data))
plaintext_fp = StringIO()
_, checksum = decrypt(encrypted_fp, plaintext_fp, encryption_key)
plaintext_fp.seek(0)
data = plaintext_fp.read()
return data, checksum
def _decrypt_cryptoengine(self, encrypted_data, encryption_key):
result = self._cryptoengine_decrypt(encrypted_data, encryption_key)
data = result["data"]
checksum = result["checksum"]
return data, checksum
def _decrypt_file_symmetric(self, encrypted_file, plaintext_file,
encryption_key):
base64_fp = file(encrypted_file)
encrypted_fp = tempfile.TemporaryFile()
base64.decode(base64_fp, encrypted_fp)
encrypted_fp.flush()
encrypted_fp.seek(0)
plaintext_fp = file(plaintext_file, "wb")
_, checksum = decrypt(encrypted_fp, plaintext_fp, encryption_key)
plaintext_fp.close()
encrypted_fp.close()
base64_fp.close()
return checksum
def _decrypt_file_cryptoengine(self, encrypted_file, plaintext_file,
encryption_key):
encrypted_data = file(encrypted_file).read()
result = self._cryptoengine_decrypt(encrypted_data, encryption_key)
data = result["data"]
checksum = result["checksum"]
plaintext_fp = file(plaintext_file, "wb")
plaintext_fp.write(data)
plaintext_fp.close()
return checksum
def retrieve(self, metadata):
"""
Retrieve data from cloud and decrypt it.
"""
# Get data from cloud.
encrypted_data = self.provider.retrieve(metadata["checksum"])
encrypted_checksum = checksum_data(encrypted_data)
if encrypted_checksum != metadata['encrypted_checksum']:
raise DataError(
metadata["checksum"],
"Wrong encrypted data checksum: {0} != {1}".format(
encrypted_checksum, metadata["encrypted_checksum"]))
# Decrypt data.
if self.provider.encryption_method == "symmetric":
data, checksum = self._decrypt_symmetric(
encrypted_data, metadata["encryption_key"])
elif self.provider.encryption_method == "cryptoengine":
data, checksum = self._decrypt_cryptoengine(
encrypted_data, metadata["encryption_key"])
else:
data, checksum = self._decrypt_gpg(encrypted_data)
if checksum != metadata['checksum']:
raise DataError(
metadata["checksum"],
"Wrong data checksum: {0} != {1}".format(
checksum, metadata["checksum"]))
return data
def retrieve_to_filename(self, metadata, filename=None):
"""
Retrieve data from cloud and decrypt it.
"""
if filename is None:
filename = metadata["path"]
directory_name = os.path.dirname(filename)
if directory_name:
try:
os.makedirs(directory_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Get data from cloud and store it to a temporary file.
encrypted_file = tempfile.NamedTemporaryFile()
self.provider.retrieve_to_filename(
metadata["checksum"], encrypted_file.name)
encrypted_checksum = checksum_file(encrypted_file.name)
if encrypted_checksum != metadata['encrypted_checksum']:
raise DataError(
metadata["checksum"],
"Wrong encrypted data checksum: {0} != {1}".format(
encrypted_checksum, metadata["encrypted_checksum"]))
# Decrypt the data in temporary file and store it to given filename.
if self.provider.encryption_method == "symmetric":
checksum = self._decrypt_file_symmetric(
encrypted_file.name, filename, metadata["encryption_key"])
elif self.provider.encryption_method == "cryptoengine":
checksum = self._decrypt_file_cryptoengine(
encrypted_file.name, filename, metadata["encryption_key"])
else:
checksum = self._decrypt_file_gpg(
encrypted_file.name, filename)
if checksum != metadata['checksum']:
raise DataError(
metadata["checksum"],
"Wrong data checksum: {0} != {1}".format(
checksum, metadata["checksum"]))
# Set file attributes.
os.chmod(filename, metadata["mode"])
os.utime(filename, (metadata["atime"], metadata["mtime"]))
encrypted_file.close()
def delete(self, metadata):
"""
Delete data from cloud.
"""
self.metadata_provider.delete(metadata["key"])
self.database.delete(metadata["key"])
if not self.database.find_one(
provider=self.metadata_provider.__name__,
checksum=metadata["checksum"]):
# Metadata is removed, remove the data.
self.provider.delete(metadata["checksum"])
| |
# Copyright (c) 2014 OpenStack Foundation, all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_utils import uuidutils
from neutron.common import constants as l3_const
from neutron.common import exceptions
from neutron import context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_dvr_db
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.common import constants as plugin_const
from neutron.tests.unit.db import test_db_base_plugin_v2
_uuid = uuidutils.generate_uuid
class FakeL3Plugin(common_db_mixin.CommonDbMixin,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agents_db.AgentDbMixin):
pass
class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
super(L3DvrTestCase, self).setUp(plugin=core_plugin)
self.core_plugin = manager.NeutronManager.get_plugin()
self.ctx = context.get_admin_context()
self.mixin = FakeL3Plugin()
def _create_router(self, router):
with self.ctx.session.begin(subtransactions=True):
return self.mixin._create_router_db(self.ctx, router, 'foo_tenant')
def _test__create_router_db(self, expected=False, distributed=None):
router = {'name': 'foo_router', 'admin_state_up': True}
if distributed is not None:
router['distributed'] = distributed
result = self._create_router(router)
self.assertEqual(expected, result.extra_attributes['distributed'])
def test_create_router_db_default(self):
self._test__create_router_db(expected=False)
def test_create_router_db_centralized(self):
self._test__create_router_db(expected=False, distributed=False)
def test_create_router_db_distributed(self):
self._test__create_router_db(expected=True, distributed=True)
def test__validate_router_migration_on_router_update(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True
}
router_db = self._create_router(router)
self.assertIsNone(self.mixin._validate_router_migration(
self.ctx, router_db, {'name': 'foo_router_2'}))
def test__validate_router_migration_raise_error(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True
}
router_db = self._create_router(router)
self.assertRaises(exceptions.BadRequest,
self.mixin._validate_router_migration,
self.ctx, router_db, {'distributed': False})
def test_upgrade_active_router_to_distributed_validation_failure(self):
router = {'name': 'foo_router', 'admin_state_up': True}
router_db = self._create_router(router)
update = {'distributed': True}
self.assertRaises(exceptions.BadRequest,
self.mixin._validate_router_migration,
self.ctx, router_db, update)
def test_update_router_db_centralized_to_distributed(self):
router = {'name': 'foo_router', 'admin_state_up': True}
agent = {'id': _uuid()}
distributed = {'distributed': True}
router_db = self._create_router(router)
router_id = router_db['id']
self.assertFalse(router_db.extra_attributes.distributed)
self.mixin._get_router = mock.Mock(return_value=router_db)
self.mixin._validate_router_migration = mock.Mock()
self.mixin._update_distributed_attr = mock.Mock()
self.mixin.list_l3_agents_hosting_router = mock.Mock(
return_value={'agents': [agent]})
self.mixin._unbind_router = mock.Mock()
router_db = self.mixin._update_router_db(
self.ctx, router_id, distributed)
# Assert that the DB value has changed
self.assertTrue(router_db.extra_attributes.distributed)
self.assertEqual(1,
self.mixin._update_distributed_attr.call_count)
def _test_get_device_owner(self, is_distributed=False,
expected=l3_const.DEVICE_OWNER_ROUTER_INTF,
pass_router_id=True):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': is_distributed
}
router_db = self._create_router(router)
router_pass = router_db['id'] if pass_router_id else router_db
with mock.patch.object(self.mixin, '_get_router') as f:
f.return_value = router_db
result = self.mixin._get_device_owner(self.ctx, router_pass)
self.assertEqual(expected, result)
def test_get_device_owner_by_router_id(self):
self._test_get_device_owner()
def test__get_device_owner_centralized(self):
self._test_get_device_owner(pass_router_id=False)
def test__get_device_owner_distributed(self):
self._test_get_device_owner(
is_distributed=True,
expected=l3_const.DEVICE_OWNER_DVR_INTERFACE,
pass_router_id=False)
def _test__is_distributed_router(self, router, expected):
result = l3_dvr_db.is_distributed_router(router)
self.assertEqual(expected, result)
def test__is_distributed_router_by_db_object(self):
router = {'name': 'foo_router', 'admin_state_up': True}
router_db = self._create_router(router)
self.mixin._get_device_owner(mock.ANY, router_db)
def test__is_distributed_router_default(self):
router = {'id': 'foo_router_id'}
self._test__is_distributed_router(router, False)
def test__is_distributed_router_centralized(self):
router = {'id': 'foo_router_id', 'distributed': False}
self._test__is_distributed_router(router, False)
def test__is_distributed_router_distributed(self):
router = {'id': 'foo_router_id', 'distributed': True}
self._test__is_distributed_router(router, True)
def test__get_agent_gw_ports_exist_for_network(self):
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_ports.return_value = []
self.mixin._get_agent_gw_ports_exist_for_network(
self.ctx, 'network_id', 'host', 'agent_id')
plugin.get_ports.assert_called_with(self.ctx, {
'network_id': ['network_id'],
'device_id': ['agent_id'],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]})
def _test_prepare_direct_delete_dvr_internal_ports(self, port):
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_port.return_value = port
self.mixin._router_exists = mock.Mock(return_value=True)
self.assertRaises(exceptions.ServicePortInUse,
self.mixin.prevent_l3_port_deletion,
self.ctx,
port['id'])
def test_prevent_delete_floatingip_agent_gateway_port(self):
port = {
'id': 'my_port_id',
'fixed_ips': mock.ANY,
'device_id': 'r_id',
'device_owner': l3_const.DEVICE_OWNER_AGENT_GW
}
self._test_prepare_direct_delete_dvr_internal_ports(port)
def test_prevent_delete_csnat_port(self):
port = {
'id': 'my_port_id',
'fixed_ips': mock.ANY,
'device_id': 'r_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT
}
self._test_prepare_direct_delete_dvr_internal_ports(port)
def test__create_gw_port_with_no_gateway(self):
router = {
'name': 'foo_router',
'admin_state_up': True,
'distributed': True,
}
router_db = self._create_router(router)
router_id = router_db['id']
self.assertTrue(router_db.extra_attributes.distributed)
with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_create_gw_port'),\
mock.patch.object(
self.mixin,
'_create_snat_intf_ports_if_not_exists') as cs:
self.mixin._create_gw_port(
self.ctx, router_id, router_db, mock.ANY,
mock.ANY)
self.assertFalse(cs.call_count)
def test_build_routers_list_with_gw_port_mismatch(self):
routers = [{'gw_port_id': 'foo_gw_port_id', 'id': 'foo_router_id'}]
gw_ports = {}
routers = self.mixin._build_routers_list(self.ctx, routers, gw_ports)
self.assertIsNone(routers[0].get('gw_port'))
def setup_port_has_ipv6_address(self, port):
with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_port_has_ipv6_address') as pv6:
pv6.return_value = True
result = self.mixin._port_has_ipv6_address(port)
return result, pv6
def test__port_has_ipv6_address_for_dvr_snat_port(self):
port = {
'id': 'my_port_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT,
}
result, pv6 = self.setup_port_has_ipv6_address(port)
self.assertFalse(result)
self.assertFalse(pv6.called)
def test__port_has_ipv6_address_for_non_snat_ports(self):
port = {
'id': 'my_port_id',
'device_owner': l3_const.DEVICE_OWNER_DVR_INTERFACE,
}
result, pv6 = self.setup_port_has_ipv6_address(port)
self.assertTrue(result)
self.assertTrue(pv6.called)
def _helper_delete_floatingip_agent_gateway_port(self, port_host):
ports = [{
'id': 'my_port_id',
portbindings.HOST_ID: 'foo_host',
'network_id': 'ext_network_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
},
{
'id': 'my_new_port_id',
portbindings.HOST_ID: 'my_foo_host',
'network_id': 'ext_network_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
}]
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_ports.return_value = ports
self.mixin.delete_floatingip_agent_gateway_port(
self.ctx, port_host, 'ext_network_id')
plugin.get_ports.assert_called_with(self.ctx, filters={
'network_id': ['ext_network_id'],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]})
if port_host:
plugin.ipam.delete_port.assert_called_once_with(
self.ctx, 'my_port_id')
else:
plugin.ipam.delete_port.assert_called_with(
self.ctx, 'my_new_port_id')
def test_delete_floatingip_agent_gateway_port_without_host_id(self):
self._helper_delete_floatingip_agent_gateway_port(None)
def test_delete_floatingip_agent_gateway_port_with_host_id(self):
self._helper_delete_floatingip_agent_gateway_port(
'foo_host')
def _setup_delete_current_gw_port_deletes_fip_agent_gw_port(
self, port=None, gw_port=True):
router = mock.MagicMock()
router.extra_attributes.distributed = True
if gw_port:
gw_port_db = {
'id': 'my_gw_id',
'network_id': 'ext_net_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
}
router.gw_port = gw_port_db
else:
router.gw_port = None
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp,\
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_delete_current_gw_port'),\
mock.patch.object(
self.mixin,
'_get_router') as grtr,\
mock.patch.object(
self.mixin,
'delete_csnat_router_interface_ports') as del_csnat_port,\
mock.patch.object(
self.mixin,
'delete_floatingip_agent_gateway_port') as del_agent_gw_port,\
mock.patch.object(
self.mixin.l3_rpc_notifier,
'delete_fipnamespace_for_ext_net') as del_fip:
plugin = mock.Mock()
gp.return_value = plugin
plugin.get_ports.return_value = port
grtr.return_value = router
self.mixin._delete_current_gw_port(
self.ctx, router['id'], router, 'ext_network_id')
return router, plugin, del_csnat_port, del_agent_gw_port, del_fip
def test_delete_current_gw_port_deletes_fip_agent_gw_port_and_fipnamespace(
self):
rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = (
self._setup_delete_current_gw_port_deletes_fip_agent_gw_port())
self.assertTrue(d_csnat_port.called)
self.assertTrue(d_agent_gw_port.called)
d_csnat_port.assert_called_once_with(
mock.ANY, rtr)
d_agent_gw_port.assert_called_once_with(mock.ANY, None, 'ext_net_id')
del_fip.assert_called_once_with(mock.ANY, 'ext_net_id')
def test_delete_current_gw_port_never_calls_delete_fip_agent_gw_port(self):
port = [{
'id': 'my_port_id',
'network_id': 'ext_net_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
},
{
'id': 'my_new_port_id',
'network_id': 'ext_net_id',
'device_owner': l3_const.DEVICE_OWNER_ROUTER_GW
}]
rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = (
self._setup_delete_current_gw_port_deletes_fip_agent_gw_port(
port=port))
self.assertTrue(d_csnat_port.called)
self.assertFalse(d_agent_gw_port.called)
self.assertFalse(del_fip.called)
d_csnat_port.assert_called_once_with(
mock.ANY, rtr)
def test_delete_current_gw_port_never_calls_delete_fipnamespace(self):
rtr, plugin, d_csnat_port, d_agent_gw_port, del_fip = (
self._setup_delete_current_gw_port_deletes_fip_agent_gw_port(
gw_port=False))
self.assertFalse(d_csnat_port.called)
self.assertFalse(d_agent_gw_port.called)
self.assertFalse(del_fip.called)
def _floatingip_on_port_test_setup(self, hostid):
router = {'id': 'foo_router_id', 'distributed': True}
floatingip = {
'id': _uuid(),
'port_id': _uuid(),
'router_id': 'foo_router_id',
'host': hostid
}
if not hostid:
hostid = 'not_my_host_id'
routers = {
'foo_router_id': router
}
fipagent = {
'id': _uuid()
}
# NOTE: mock.patch is not needed here since self.mixin is created fresh
# for each test. It doesn't work with some methods since the mixin is
# tested in isolation (e.g. _get_agent_by_type_and_host).
self.mixin._get_dvr_service_port_hostid = mock.Mock(
return_value=hostid)
self.mixin._get_agent_by_type_and_host = mock.Mock(
return_value=fipagent)
self.mixin._get_fip_sync_interfaces = mock.Mock(
return_value='fip_interface')
agent = mock.Mock()
agent.id = fipagent['id']
self.mixin._process_floating_ips_dvr(self.ctx, routers, [floatingip],
hostid, agent)
return (router, floatingip)
def test_floatingip_on_port_not_host(self):
router, fip = self._floatingip_on_port_test_setup(None)
self.assertNotIn(l3_const.FLOATINGIP_KEY, router)
self.assertNotIn(l3_const.FLOATINGIP_AGENT_INTF_KEY, router)
def test_floatingip_on_port_with_host(self):
router, fip = self._floatingip_on_port_test_setup(_uuid())
self.assertTrue(self.mixin._get_fip_sync_interfaces.called)
self.assertIn(l3_const.FLOATINGIP_KEY, router)
self.assertIn(l3_const.FLOATINGIP_AGENT_INTF_KEY, router)
self.assertIn(fip, router[l3_const.FLOATINGIP_KEY])
self.assertIn('fip_interface',
router[l3_const.FLOATINGIP_AGENT_INTF_KEY])
def _setup_test_create_floatingip(
self, fip, floatingip_db, router_db):
port = {
'id': '1234',
portbindings.HOST_ID: 'myhost',
'network_id': 'external_net'
}
with mock.patch.object(self.mixin, 'get_router') as grtr,\
mock.patch.object(self.mixin,
'_get_dvr_service_port_hostid') as vmp,\
mock.patch.object(
self.mixin,
'create_fip_agent_gw_port_if_not_exists') as c_fip,\
mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin,
'_update_fip_assoc'):
grtr.return_value = router_db
vmp.return_value = 'my-host'
self.mixin._update_fip_assoc(
self.ctx, fip, floatingip_db, port)
return c_fip
def test_create_floatingip_agent_gw_port_with_dvr_router(self):
floatingip = {
'id': _uuid(),
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': True}
fip = {
'id': _uuid(),
'port_id': _uuid()
}
create_fip = (
self._setup_test_create_floatingip(
fip, floatingip, router))
self.assertTrue(create_fip.called)
def test_create_floatingip_agent_gw_port_with_non_dvr_router(self):
floatingip = {
'id': _uuid(),
'router_id': 'foo_router_id'
}
router = {'id': 'foo_router_id', 'distributed': False}
fip = {
'id': _uuid(),
'port_id': _uuid()
}
create_fip = (
self._setup_test_create_floatingip(
fip, floatingip, router))
self.assertFalse(create_fip.called)
def test_remove_router_interface_csnat_ports_removal(self):
router_dict = {'name': 'test_router', 'admin_state_up': True,
'distributed': True}
router = self._create_router(router_dict)
plugin = mock.MagicMock()
with self.network() as net_ext,\
self.subnet() as subnet1,\
self.subnet(cidr='20.0.0.0/24') as subnet2:
ext_net_id = net_ext['network']['id']
self.core_plugin.update_network(
self.ctx, ext_net_id,
{'network': {'router:external': True}})
self.mixin.update_router(
self.ctx, router['id'],
{'router': {'external_gateway_info':
{'network_id': ext_net_id}}})
self.mixin.add_router_interface(self.ctx, router['id'],
{'subnet_id': subnet1['subnet']['id']})
self.mixin.add_router_interface(self.ctx, router['id'],
{'subnet_id': subnet2['subnet']['id']})
csnat_filters = {'device_owner':
[l3_const.DEVICE_OWNER_ROUTER_SNAT]}
csnat_ports = self.core_plugin.get_ports(
self.ctx, filters=csnat_filters)
self.assertEqual(2, len(csnat_ports))
dvr_filters = {'device_owner':
[l3_const.DEVICE_OWNER_DVR_INTERFACE]}
dvr_ports = self.core_plugin.get_ports(
self.ctx, filters=dvr_filters)
self.assertEqual(2, len(dvr_ports))
with mock.patch.object(manager.NeutronManager,
'get_service_plugins') as get_svc_plugin:
get_svc_plugin.return_value = {
plugin_const.L3_ROUTER_NAT: plugin}
self.mixin.manager = manager
self.mixin.remove_router_interface(
self.ctx, router['id'], {'port_id': dvr_ports[0]['id']})
csnat_ports = self.core_plugin.get_ports(
self.ctx, filters=csnat_filters)
self.assertEqual(1, len(csnat_ports))
self.assertEqual(dvr_ports[1]['fixed_ips'][0]['subnet_id'],
csnat_ports[0]['fixed_ips'][0]['subnet_id'])
dvr_ports = self.core_plugin.get_ports(
self.ctx, filters=dvr_filters)
self.assertEqual(1, len(dvr_ports))
def test__validate_router_migration_notify_advanced_services(self):
router = {'name': 'foo_router', 'admin_state_up': False}
router_db = self._create_router(router)
with mock.patch.object(l3_dvr_db.registry, 'notify') as mock_notify:
self.mixin._validate_router_migration(
self.ctx, router_db, {'distributed': True})
kwargs = {'context': self.ctx, 'router': router_db}
mock_notify.assert_called_once_with(
'router', 'before_update', self.mixin, **kwargs)
def _test_update_arp_entry_for_dvr_service_port(
self, device_owner, action):
router_dict = {'name': 'test_router', 'admin_state_up': True,
'distributed': True}
router = self._create_router(router_dict)
with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp:
plugin = mock.Mock()
l3_notify = self.mixin.l3_rpc_notifier = mock.Mock()
gp.return_value = plugin
port = {
'id': 'my_port_id',
'fixed_ips': [
{'subnet_id': '51edc9e0-24f9-47f2-8e1e-2a41cb691323',
'ip_address': '10.0.0.11'},
{'subnet_id': '2b7c8a07-6f8e-4937-8701-f1d5da1a807c',
'ip_address': '10.0.0.21'},
{'subnet_id': '48534187-f077-4e81-93ff-81ec4cc0ad3b',
'ip_address': 'fd45:1515:7e0:0:f816:3eff:fe1a:1111'}],
'mac_address': 'my_mac',
'device_owner': device_owner
}
dvr_port = {
'id': 'dvr_port_id',
'fixed_ips': mock.ANY,
'device_owner': l3_const.DEVICE_OWNER_DVR_INTERFACE,
'device_id': router['id']
}
plugin.get_ports.return_value = [dvr_port]
if action == 'add':
self.mixin.update_arp_entry_for_dvr_service_port(
self.ctx, port)
self.assertEqual(3, l3_notify.add_arp_entry.call_count)
elif action == 'del':
self.mixin.delete_arp_entry_for_dvr_service_port(
self.ctx, port)
self.assertTrue(3, l3_notify.del_arp_entry.call_count)
def test_update_arp_entry_for_dvr_service_port_added(self):
action = 'add'
device_owner = l3_const.DEVICE_OWNER_LOADBALANCER
self._test_update_arp_entry_for_dvr_service_port(device_owner, action)
def test_update_arp_entry_for_dvr_service_port_deleted(self):
action = 'del'
device_owner = l3_const.DEVICE_OWNER_LOADBALANCER
self._test_update_arp_entry_for_dvr_service_port(device_owner, action)
def test_add_router_interface_csnat_ports_failure(self):
router_dict = {'name': 'test_router', 'admin_state_up': True,
'distributed': True}
router = self._create_router(router_dict)
with self.network() as net_ext,\
self.subnet() as subnet:
ext_net_id = net_ext['network']['id']
self.core_plugin.update_network(
self.ctx, ext_net_id,
{'network': {'router:external': True}})
self.mixin.update_router(
self.ctx, router['id'],
{'router': {'external_gateway_info':
{'network_id': ext_net_id}}})
with mock.patch.object(
self.mixin, '_add_csnat_router_interface_port') as f:
f.side_effect = RuntimeError()
self.assertRaises(
RuntimeError,
self.mixin.add_router_interface,
self.ctx, router['id'],
{'subnet_id': subnet['subnet']['id']})
filters = {
'device_id': [router['id']],
}
router_ports = self.core_plugin.get_ports(self.ctx, filters)
self.assertEqual(1, len(router_ports))
self.assertEqual(l3_const.DEVICE_OWNER_ROUTER_GW,
router_ports[0]['device_owner'])
| |
# -*- coding: utf-8 -*-
symbols = (
"H",
"He",
"Li",
"Be",
"B",
"C",
"N",
"O",
"F",
"Ne",
"Na",
"Mg",
"Al",
"Si",
"P",
"S",
"Cl",
"Ar",
"K",
"Ca",
"Sc",
"Ti",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"Cu",
"Zn",
"Ga",
"Ge",
"As",
"Se",
"Br",
"Kr",
"Rb",
"Sr",
"Y",
"Zr",
"Nb",
"Mo",
"Tc",
"Ru",
"Rh",
"Pd",
"Ag",
"Cd",
"In",
"Sn",
"Sb",
"Te",
"I",
"Xe",
"Cs",
"Ba",
"La",
"Ce",
"Pr",
"Nd",
"Pm",
"Sm",
"Eu",
"Gd",
"Tb",
"Dy",
"Ho",
"Er",
"Tm",
"Yb",
"Lu",
"Hf",
"Ta",
"W",
"Re",
"Os",
"Ir",
"Pt",
"Au",
"Hg",
"Tl",
"Pb",
"Bi",
"Po",
"At",
"Rn",
"Fr",
"Ra",
"Ac",
"Th",
"Pa",
"U",
"Np",
"Pu",
"Am",
"Cm",
"Bk",
"Cf",
"Es",
"Fm",
"Md",
"No",
"Lr",
"Rf",
"Db",
"Sg",
"Bh",
"Hs",
"Mt",
"Ds",
"Rg",
"Cn",
"Nh",
"Fl",
"Mc",
"Lv",
"Ts",
"Og",
)
period_lengths = (2, 8, 8, 18, 18, 32, 32)
accum_period_lengths = (2, 10, 18, 36, 54, 86, 118)
# icosagens, crystallogens, pnictogens, chalcogens, halogens
groups = {g: tuple(x - 18 + g for x in accum_period_lengths[1:]) for g in range(13, 18)}
groups[1] = (1,) + tuple(x + 1 for x in accum_period_lengths[:-1]) # alkali metals
groups[2] = tuple(x + 2 for x in accum_period_lengths[:-1]) # alkaline earth metals
groups[18] = accum_period_lengths # noble gases
names = (
"Hydrogen",
"Helium",
"Lithium",
"Beryllium",
"Boron",
"Carbon",
"Nitrogen",
"Oxygen",
"Fluorine",
"Neon",
"Sodium",
"Magnesium",
"Aluminium",
"Silicon",
"Phosphorus",
"Sulfur",
"Chlorine",
"Argon",
"Potassium",
"Calcium",
"Scandium",
"Titanium",
"Vanadium",
"Chromium",
"Manganese",
"Iron",
"Cobalt",
"Nickel",
"Copper",
"Zinc",
"Gallium",
"Germanium",
"Arsenic",
"Selenium",
"Bromine",
"Krypton",
"Rubidium",
"Strontium",
"Yttrium",
"Zirconium",
"Niobium",
"Molybdenum",
"Technetium",
"Ruthenium",
"Rhodium",
"Palladium",
"Silver",
"Cadmium",
"Indium",
"Tin",
"Antimony",
"Tellurium",
"Iodine",
"Xenon",
"Caesium",
"Barium",
"Lanthanum",
"Cerium",
"Praseodymium",
"Neodymium",
"Promethium",
"Samarium",
"Europium",
"Gadolinium",
"Terbium",
"Dysprosium",
"Holmium",
"Erbium",
"Thulium",
"Ytterbium",
"Lutetium",
"Hafnium",
"Tantalum",
"Tungsten",
"Rhenium",
"Osmium",
"Iridium",
"Platinum",
"Gold",
"Mercury",
"Thallium",
"Lead",
"Bismuth",
"Polonium",
"Astatine",
"Radon",
"Francium",
"Radium",
"Actinium",
"Thorium",
"Protactinium",
"Uranium",
"Neptunium",
"Plutonium",
"Americium",
"Curium",
"Berkelium",
"Californium",
"Einsteinium",
"Fermium",
"Mendelevium",
"Nobelium",
"Lawrencium",
"Rutherfordium",
"Dubnium",
"Seaborgium",
"Bohrium",
"Hassium",
"Meitnerium",
"Darmstadtium",
"Roentgenium",
"Copernicium",
"Nihonium",
"Flerovium",
"Moscovium",
"Livermorium",
"Tennessine",
"Oganesson",
)
lower_names = tuple(n.lower().lstrip("(").rstrip(")") for n in names)
def atomic_number(name):
try:
return symbols.index(name) + 1
except ValueError:
return lower_names.index(name.lower()) + 1
# The data in '_relative_atomic_masses' is licensed under the CC-SA license
# https://en.wikipedia.org/w/index.php?title=List_of_elements&oldid=700476748
_relative_atomic_masses = (
"1.008 4.002602(2) 6.94 9.0121831(5) 10.81 12.011 14.007 15.999"
" 18.998403163(6) 20.1797(6) 22.98976928(2) 24.305 26.9815385(7) 28.085"
" 30.973761998(5) 32.06 35.45 39.948(1) 39.0983(1) 40.078(4)"
" 44.955908(5) 47.867(1) 50.9415(1) 51.9961(6) 54.938044(3) 55.845(2)"
" 58.933194(4) 58.6934(4) 63.546(3) 65.38(2) 69.723(1) 72.630(8)"
" 74.921595(6) 78.971(8) 79.904 83.798(2) 85.4678(3) 87.62(1)"
" 88.90584(2) 91.224(2) 92.90637(2) 95.95(1) [98] 101.07(2) 102.90550(2)"
" 106.42(1) 107.8682(2) 112.414(4) 114.818(1) 118.710(7) 121.760(1)"
" 127.60(3) 126.90447(3) 131.293(6) 132.90545196(6) 137.327(7)"
" 138.90547(7) 140.116(1) 140.90766(2) 144.242(3) [145] 150.36(2)"
" 151.964(1) 157.25(3) 158.92535(2) 162.500(1) 164.93033(2) 167.259(3)"
" 168.93422(2) 173.045(10) 174.9668(1) 178.49(2) 180.94788(2) 183.84(1)"
" 186.207(1) 190.23(3) 192.217(3) 195.084(9) 196.966569(5) 200.592(3)"
" 204.38 207.2(1) 208.98040(1) [209] [210] [222] [223] [226] [227]"
" 232.0377(4) 231.03588(2) 238.02891(3) [237] [244] [243] [247] [247]"
" [251] [252] [257] [258] [259] [266] [267] [268] [269] [270] [271]"
" [278] [281] [282] [285] [286] [289] [290] [293] [294] [294]"
)
def _get_relative_atomic_masses():
for mass in _relative_atomic_masses.split():
if mass.startswith("[") and mass.endswith("]"):
yield float(mass[1:-1])
elif "(" in mass:
yield float(mass.split("(")[0])
else:
yield (float(mass))
relative_atomic_masses = tuple(_get_relative_atomic_masses())
def mass_from_composition(composition):
"""Calculates molecular mass from atomic weights
Parameters
----------
composition: dict
Dictionary mapping int (atomic number) to int (coefficient)
Returns
-------
float
molecular weight in atomic mass units
Notes
-----
Atomic number 0 denotes charge or "net electron defficiency"
Examples
--------
>>> '%.2f' % mass_from_composition({0: -1, 1: 1, 8: 1})
'17.01'
"""
mass = 0.0
for k, v in composition.items():
if k == 0: # electron
mass -= v * 5.489e-4
else:
mass += v * relative_atomic_masses[k - 1]
return mass
| |
import sys
import datetime
import os
helpers_dir = os.getenv("PYCHARM_HELPERS_DIR", sys.path[0])
if sys.path[0] != helpers_dir:
sys.path.insert(0, helpers_dir)
from tcunittest import TeamcityTestResult
from tcmessages import TeamcityServiceMessages
from pycharm_run_utils import import_system_module
from pycharm_run_utils import adjust_sys_path, debug, getModuleName, PYTHON_VERSION_MAJOR
adjust_sys_path()
re = import_system_module("re")
doctest = import_system_module("doctest")
traceback = import_system_module("traceback")
class TeamcityDocTestResult(TeamcityTestResult):
"""
DocTests Result extends TeamcityTestResult,
overrides some methods, specific for doc tests,
such as getTestName, getTestId.
"""
def getTestName(self, test):
name = self.current_suite.name + test.source
return name
def getSuiteName(self, suite):
if test.source.rfind(".") == -1:
name = self.current_suite.name + test.source
else:
name = test.source
return name
def getTestId(self, test):
file = os.path.realpath(self.current_suite.filename) if self.current_suite.filename else ""
line_no = test.lineno
if self.current_suite.lineno:
line_no += self.current_suite.lineno
return "file://" + file + ":" + str(line_no)
def getSuiteLocation(self):
file = os.path.realpath(self.current_suite.filename) if self.current_suite.filename else ""
location = "file://" + file
if self.current_suite.lineno:
location += ":" + str(self.current_suite.lineno)
return location
def startTest(self, test):
setattr(test, "startTime", datetime.datetime.now())
id = self.getTestId(test)
self.messages.testStarted(self.getTestName(test), location=id)
def startSuite(self, suite):
self.current_suite = suite
self.messages.testSuiteStarted(suite.name, location=self.getSuiteLocation())
def stopSuite(self, suite):
self.messages.testSuiteFinished(suite.name)
def addFailure(self, test, err = '', expected=None, actual=None):
self.messages.testFailed(self.getTestName(test), expected=expected, actual=actual,
message='Failure', details=err, duration=int(self.__getDuration(test)))
def addError(self, test, err = ''):
self.messages.testError(self.getTestName(test),
message='Error', details=err, duration=self.__getDuration(test))
def stopTest(self, test):
duration = self.__getDuration(test)
self.messages.testFinished(self.getTestName(test), duration=int(duration))
def __getDuration(self, test):
start = getattr(test, "startTime", datetime.datetime.now())
d = datetime.datetime.now() - start
duration = d.microseconds / 1000 + d.seconds * 1000 + d.days * 86400000
return duration
class DocTestRunner(doctest.DocTestRunner):
"""
Special runner for doctests,
overrides __run method to report results using TeamcityDocTestResult
"""
def __init__(self, checker=None, verbose=None, optionflags=0):
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose, optionflags=optionflags)
self.stream = sys.stdout
self.result = TeamcityDocTestResult(self.stream)
#self.result.messages.testMatrixEntered()
self._tests = []
def addTests(self, tests):
self._tests.extend(tests)
def addTest(self, test):
self._tests.append(test)
def countTests(self):
return len(self._tests)
def start(self):
for test in self._tests:
self.run(test)
def __run(self, test, compileflags, out):
failures = tries = 0
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
self.result.startSuite(test)
for examplenum, example in enumerate(test.examples):
quiet = (self.optionflags & doctest.REPORT_ONLY_FIRST_FAILURE and
failures > 0)
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
if hasattr(doctest, 'SKIP'):
if self.optionflags & doctest.SKIP:
continue
tries += 1
if not quiet:
self.report_start(out, test, example)
filename = '<doctest %s[%d]>' % (test.name, examplenum)
try:
exec(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
else:
exc_msg = traceback.format_exception_only(*exception[:2])[-1]
if not quiet:
got += doctest._exception_traceback(exception)
if example.exc_msg is None:
outcome = BOOM
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
elif self.optionflags & doctest.IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
self.result.startTest(example)
self.result.stopTest(example)
elif outcome is FAILURE:
self.result.startTest(example)
err = self._failure_header(test, example) +\
self._checker.output_difference(example, got, self.optionflags)
expected = getattr(example, "want", None)
self.result.addFailure(example, err, expected=expected, actual=got)
elif outcome is BOOM:
self.result.startTest(example)
err=self._failure_header(test, example) +\
'Exception raised:\n' + doctest._indent(doctest._exception_traceback(exception))
self.result.addError(example, err)
else:
assert False, ("unknown outcome", outcome)
self.optionflags = original_optionflags
self.result.stopSuite(test)
modules = {}
runner = DocTestRunner()
def _load_file(moduleName, fileName):
if sys.version_info >= (3, 5):
import importlib
return importlib.import_module(moduleName, fileName)
else:
import imp
return imp.load_source(moduleName, fileName)
def loadSource(fileName):
"""
loads source from fileName,
we can't use tat function from utrunner, because of we
store modules in global variable.
"""
baseName = os.path.basename(fileName)
moduleName = os.path.splitext(baseName)[0]
# for users wanted to run simple doctests under django
#because of django took advantage of module name
settings_file = os.getenv('DJANGO_SETTINGS_MODULE')
if settings_file and moduleName=="models":
baseName = os.path.realpath(fileName)
moduleName = ".".join((baseName.split(os.sep)[-2], "models"))
if moduleName in modules: # add unique number to prevent name collisions
cnt = 2
prefix = moduleName
while getModuleName(prefix, cnt) in modules:
cnt += 1
moduleName = getModuleName(prefix, cnt)
debug("/ Loading " + fileName + " as " + moduleName)
module = _load_file(moduleName, fileName)
modules[moduleName] = module
return module
def testfile(filename):
if PYTHON_VERSION_MAJOR == 3:
text, filename = doctest._load_testfile(filename, None, False, "utf-8")
else:
text, filename = doctest._load_testfile(filename, None, False)
name = os.path.basename(filename)
globs = {'__name__': '__main__'}
parser = doctest.DocTestParser()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
if test.examples:
runner.addTest(test)
def testFilesInFolder(folder):
return testFilesInFolderUsingPattern(folder)
def testFilesInFolderUsingPattern(folder, pattern = ".*"):
''' loads modules from folder ,
check if module name matches given pattern'''
modules = []
prog = re.compile(pattern)
for root, dirs, files in os.walk(folder):
for name in files:
path = os.path.join(root, name)
if prog.match(name):
if name.endswith(".py"):
modules.append(loadSource(path))
elif not name.endswith(".pyc") and not name.endswith("$py.class") and os.path.isfile(path):
testfile(path)
return modules
if __name__ == "__main__":
finder = doctest.DocTestFinder()
for arg in sys.argv[1:]:
arg = arg.strip()
if len(arg) == 0:
continue
a = arg.split("::")
if len(a) == 1:
# From module or folder
a_splitted = a[0].split(";")
if len(a_splitted) != 1:
# means we have pattern to match against
if a_splitted[0].endswith("/"):
debug("/ from folder " + a_splitted[0] + ". Use pattern: " + a_splitted[1])
modules = testFilesInFolderUsingPattern(a_splitted[0], a_splitted[1])
else:
if a[0].endswith("/"):
debug("/ from folder " + a[0])
modules = testFilesInFolder(a[0])
else:
# from file
debug("/ from module " + a[0])
# for doctests from non-python file
if a[0].rfind(".py") == -1:
testfile(a[0])
modules = []
else:
modules = [loadSource(a[0])]
# for doctests
for module in modules:
tests = finder.find(module, module.__name__)
for test in tests:
if test.examples:
runner.addTest(test)
elif len(a) == 2:
# From testcase
debug("/ from class " + a[1] + " in " + a[0])
try:
module = loadSource(a[0])
except SyntaxError:
raise NameError('File "%s" is not python file' % (a[0], ))
if hasattr(module, a[1]):
testcase = getattr(module, a[1])
tests = finder.find(testcase, getattr(testcase, "__name__", None))
runner.addTests(tests)
else:
raise NameError('Module "%s" has no class "%s"' % (a[0], a[1]))
else:
# From method in class or from function
try:
module = loadSource(a[0])
except SyntaxError:
raise NameError('File "%s" is not python file' % (a[0], ))
if a[1] == "":
# test function, not method
debug("/ from method " + a[2] + " in " + a[0])
if hasattr(module, a[2]):
testcase = getattr(module, a[2])
tests = finder.find(testcase, getattr(testcase, "__name__", None))
runner.addTests(tests)
else:
raise NameError('Module "%s" has no method "%s"' % (a[0], a[2]))
else:
debug("/ from method " + a[2] + " in class " + a[1] + " in " + a[0])
if hasattr(module, a[1]):
testCaseClass = getattr(module, a[1])
if hasattr(testCaseClass, a[2]):
testcase = getattr(testCaseClass, a[2])
name = getattr(testcase, "__name__", None)
if not name:
name = testCaseClass.__name__
tests = finder.find(testcase, name)
runner.addTests(tests)
else:
raise NameError('Class "%s" has no function "%s"' % (testCaseClass, a[2]))
else:
raise NameError('Module "%s" has no class "%s"' % (module, a[1]))
debug("/ Loaded " + str(runner.countTests()) + " tests")
TeamcityServiceMessages(sys.stdout).testCount(runner.countTests())
runner.start()
| |
# Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
import ConfigParser
import json
import hashlib
import logging
import netaddr
import os
import sys
import traceback
import urllib
import uuid
from common import config
from quantum.plugins.nicira.nicira_nvp_plugin.api_client import client_eventlet
import NvpApiClient
import nvplib
from nvp_plugin_version import PLUGIN_VERSION
from quantum.api.v2 import attributes
from quantum.common import constants
from quantum.common import exceptions as exception
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import models_v2
from quantum.openstack.common import cfg
from quantum.openstack.common import rpc
CONFIG_FILE = "nvp.ini"
CONFIG_FILE_PATHS = []
if os.environ.get('QUANTUM_HOME', None):
CONFIG_FILE_PATHS.append('%s/etc' % os.environ['QUANTUM_HOME'])
CONFIG_FILE_PATHS.append("/etc/quantum/plugins/nicira")
LOG = logging.getLogger("QuantumPlugin")
def parse_config():
"""Parse the supplied plugin configuration.
:param config: a ConfigParser() object encapsulating nvp.ini.
:returns: A tuple: (clusters, plugin_config). 'clusters' is a list of
NVPCluster objects, 'plugin_config' is a dictionary with plugin
parameters (currently only 'max_lp_per_bridged_ls').
"""
db_options = {"sql_connection": cfg.CONF.DATABASE.sql_connection}
db_options.update({'base': models_v2.model_base.BASEV2})
sql_max_retries = cfg.CONF.DATABASE.sql_max_retries
db_options.update({"sql_max_retries": sql_max_retries})
reconnect_interval = cfg.CONF.DATABASE.reconnect_interval
db_options.update({"reconnect_interval": reconnect_interval})
nvp_options = {'max_lp_per_bridged_ls': cfg.CONF.NVP.max_lp_per_bridged_ls}
nvp_options.update({'failover_time': cfg.CONF.NVP.failover_time})
nvp_options.update({'concurrent_connections':
cfg.CONF.NVP.concurrent_connections})
nvp_conf = config.ClusterConfigOptions(cfg.CONF)
cluster_names = config.register_cluster_groups(nvp_conf)
nvp_conf.log_opt_values(LOG, logging.DEBUG)
clusters_options = []
for cluster_name in cluster_names:
clusters_options.append(
{'name': cluster_name,
'default_tz_uuid':
nvp_conf[cluster_name].default_tz_uuid,
'nvp_cluster_uuid':
nvp_conf[cluster_name].nvp_cluster_uuid,
'nova_zone_id':
nvp_conf[cluster_name].nova_zone_id,
'nvp_controller_connection':
nvp_conf[cluster_name].nvp_controller_connection, })
LOG.debug("cluster options:%s", clusters_options)
return db_options, nvp_options, clusters_options
class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self])
class NVPCluster(object):
"""Encapsulates controller connection and api_client for a cluster.
Accessed within the NvpPluginV2 class.
Each element in the self.controllers list is a dictionary that
contains the following keys:
ip, port, user, password, default_tz_uuid, uuid, zone
There may be some redundancy here, but that has been done to provide
future flexibility.
"""
def __init__(self, name):
self._name = name
self.controllers = []
self.api_client = None
def __repr__(self):
ss = ['{ "NVPCluster": [']
ss.append('{ "name" : "%s" }' % self.name)
ss.append(',')
for c in self.controllers:
ss.append(str(c))
ss.append(',')
ss.append('] }')
return ''.join(ss)
def add_controller(self, ip, port, user, password, request_timeout,
http_timeout, retries, redirects,
default_tz_uuid, uuid=None, zone=None):
"""Add a new set of controller parameters.
:param ip: IP address of controller.
:param port: port controller is listening on.
:param user: user name.
:param password: user password.
:param request_timeout: timeout for an entire API request.
:param http_timeout: timeout for a connect to a controller.
:param retries: maximum number of request retries.
:param redirects: maximum number of server redirect responses to
follow.
:param default_tz_uuid: default transport zone uuid.
:param uuid: UUID of this cluster (used in MDI configs).
:param zone: Zone of this cluster (used in MDI configs).
"""
keys = [
'ip', 'user', 'password', 'default_tz_uuid', 'uuid', 'zone']
controller_dict = dict([(k, locals()[k]) for k in keys])
int_keys = [
'port', 'request_timeout', 'http_timeout', 'retries', 'redirects']
for k in int_keys:
controller_dict[k] = int(locals()[k])
self.controllers.append(controller_dict)
def get_controller(self, idx):
return self.controllers[idx]
@property
def name(self):
return self._name
@name.setter
def name(self, val=None):
self._name = val
@property
def host(self):
return self.controllers[0]['ip']
@property
def port(self):
return self.controllers[0]['port']
@property
def user(self):
return self.controllers[0]['user']
@property
def password(self):
return self.controllers[0]['password']
@property
def request_timeout(self):
return self.controllers[0]['request_timeout']
@property
def http_timeout(self):
return self.controllers[0]['http_timeout']
@property
def retries(self):
return self.controllers[0]['retries']
@property
def redirects(self):
return self.controllers[0]['redirects']
@property
def default_tz_uuid(self):
return self.controllers[0]['default_tz_uuid']
@property
def zone(self):
return self.controllers[0]['zone']
@property
def uuid(self):
return self.controllers[0]['uuid']
class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
"""
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
functionality using NVP.
"""
def __init__(self, loglevel=None):
if loglevel:
logging.basicConfig(level=loglevel)
nvplib.LOG.setLevel(loglevel)
NvpApiClient.LOG.setLevel(loglevel)
self.db_opts, self.nvp_opts, self.clusters_opts = parse_config()
self.clusters = []
for c_opts in self.clusters_opts:
# Password is guaranteed to be the same across all controllers
# in the same NVP cluster.
cluster = NVPCluster(c_opts['name'])
for controller_connection in c_opts['nvp_controller_connection']:
args = controller_connection.split(':')
try:
args.extend([c_opts['default_tz_uuid'],
c_opts['nvp_cluster_uuid'],
c_opts['nova_zone_id']])
cluster.add_controller(*args)
except Exception:
LOG.exception("Invalid connection parameters for "
"controller %s in cluster %s",
controller_connection,
c_opts['name'])
raise
api_providers = [(x['ip'], x['port'], True)
for x in cluster.controllers]
cluster.api_client = NvpApiClient.NVPApiHelper(
api_providers, cluster.user, cluster.password,
request_timeout=cluster.request_timeout,
http_timeout=cluster.http_timeout,
retries=cluster.retries,
redirects=cluster.redirects,
failover_time=self.nvp_opts['failover_time'],
concurrent_connections=self.nvp_opts['concurrent_connections'])
# TODO(salvatore-orlando): do login at first request,
# not when plugin, is instantiated
cluster.api_client.login()
# TODO(pjb): What if the cluster isn't reachable this
# instant? It isn't good to fall back to invalid cluster
# strings.
# Default for future-versions
self.clusters.append(cluster)
# Connect and configure ovs_quantum db
options = {
'sql_connection': self.db_opts['sql_connection'],
'sql_max_retries': self.db_opts['sql_max_retries'],
'reconnect_interval': self.db_opts['reconnect_interval'],
'base': models_v2.model_base.BASEV2,
}
db.configure_db(options)
self.setup_rpc()
def setup_rpc(self):
# RPC support for dhcp
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.dispatcher = NVPRpcCallbacks().create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
@property
def cluster(self):
if len(self.clusters):
return self.clusters[0]
return None
def clear_state(self):
nvplib.clear_state(self.clusters[0])
def get_all_networks(self, tenant_id, **kwargs):
networks = []
for c in self.clusters:
networks.extend(nvplib.get_all_networks(c, tenant_id, networks))
LOG.debug("get_all_networks() completed for tenant %s: %s" % (
tenant_id, networks))
return networks
def create_network(self, context, network):
"""
:returns: a sequence of mappings with the following signature:
{'id': UUID representing the network.
'name': Human-readable name identifying the network.
'tenant_id': Owner of network. only admin user
can specify a tenant_id other than its own.
'admin_state_up': Sets admin state of network. if down,
network does not forward packets.
'status': Indicates whether network is currently
operational (limit values to "ACTIVE", "DOWN",
"BUILD", and "ERROR"?
'subnets': Subnets associated with this network. Plan
to allow fully specified subnets as part of
network create.
}
:raises: exception.NoImplementedError
"""
# FIXME(arosen) implement admin_state_up = False in NVP
if network['network']['admin_state_up'] is False:
LOG.warning("Network with admin_state_up=False are not yet "
"supported by this plugin. Ignoring setting for "
"network %s",
network['network'].get('name', '<unknown>'))
tenant_id = self._get_tenant_id_for_create(context, network)
# TODO(salvatore-orlando): if the network is shared this should be
# probably stored into the lswitch with a tag
# TODO(salvatore-orlando): Important - provider networks support
# (might require a bridged TZ)
net = nvplib.create_network(network['network']['tenant_id'],
network['network']['name'],
clusters=self.clusters)
network['network']['id'] = net['net-id']
return super(NvpPluginV2, self).create_network(context, network)
def delete_network(self, context, id):
"""
Deletes the network with the specified network identifier
belonging to the specified tenant.
:returns: None
:raises: exception.NetworkInUse
:raises: exception.NetworkNotFound
"""
super(NvpPluginV2, self).delete_network(context, id)
pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
for (cluster, switches) in pairs:
nvplib.delete_networks(cluster, id, switches)
LOG.debug("delete_network() completed for tenant: %s" %
context.tenant_id)
def _get_lswitch_cluster_pairs(self, netw_id, tenant_id):
"""Figure out the set of lswitches on each cluster that maps to this
network id"""
pairs = []
for c in self.clusters:
lswitches = []
try:
ls = nvplib.get_network(c, netw_id)
lswitches.append(ls['uuid'])
except exception.NetworkNotFound:
continue
pairs.append((c, lswitches))
if len(pairs) == 0:
raise exception.NetworkNotFound(net_id=netw_id)
LOG.debug("Returning pairs for network: %s" % (pairs))
return pairs
def get_network(self, context, id, fields=None):
"""
Retrieves all attributes of the network, NOT including
the ports of that network.
:returns: a sequence of mappings with the following signature:
{'id': UUID representing the network.
'name': Human-readable name identifying the network.
'tenant_id': Owner of network. only admin user
can specify a tenant_id other than its own.
'admin_state_up': Sets admin state of network. if down,
network does not forward packets.
'status': Indicates whether network is currently
operational (limit values to "ACTIVE", "DOWN",
"BUILD", and "ERROR"?
'subnets': Subnets associated with this network. Plan
to allow fully specified subnets as part of
network create.
}
:raises: exception.NetworkNotFound
:raises: exception.QuantumException
"""
result = {}
lswitch_query = "&uuid=%s" % id
# always look for the tenant_id in the resource itself rather than
# the context, as with shared networks context.tenant_id and
# network['tenant_id'] might differ on GETs
# goto to the plugin DB and fecth the network
network = self._get_network(context, id)
# TODO(salvatore-orlando): verify whether the query on os_tid is
# redundant or not.
if context.is_admin is False:
tenant_query = ("&tag=%s&tag_scope=os_tid"
% network['tenant_id'])
else:
tenant_query = ""
# Then fetch the correspondiong logical switch in NVP as well
# TODO(salvatore-orlando): verify whether the step on NVP
# can be completely avoided
lswitch_url_path = (
"/ws.v1/lswitch?"
"fields=uuid,display_name%s%s"
% (tenant_query, lswitch_query))
try:
for c in self.clusters:
lswitch_results = nvplib.get_all_query_pages(
lswitch_url_path, c)
if lswitch_results:
result['lswitch-display-name'] = (
lswitch_results[0]['display_name'])
break
except Exception:
LOG.error("Unable to get switches: %s" % traceback.format_exc())
raise exception.QuantumException()
if 'lswitch-display-name' not in result:
raise exception.NetworkNotFound(net_id=id)
# Fetch network in quantum
quantum_db = super(NvpPluginV2, self).get_network(context, id, fields)
d = {'id': id,
'name': result['lswitch-display-name'],
'tenant_id': network['tenant_id'],
'admin_state_up': True,
'status': constants.NET_STATUS_ACTIVE,
'shared': network['shared'],
'subnets': quantum_db.get('subnets', [])}
LOG.debug("get_network() completed for tenant %s: %s" % (
context.tenant_id, d))
return d
def get_networks(self, context, filters=None, fields=None):
"""
Retrieves all attributes of the network, NOT including
the ports of that network.
:returns: a sequence of mappings with the following signature:
{'id': UUID representing the network.
'name': Human-readable name identifying the network.
'tenant_id': Owner of network. only admin user
can specify a tenant_id other than its own.
'admin_state_up': Sets admin state of network. if down,
network does not forward packets.
'status': Indicates whether network is currently
operational (limit values to "ACTIVE", "DOWN",
"BUILD", and "ERROR"?
'subnets': Subnets associated with this network. Plan
to allow fully specified subnets as part of
network create.
}
:raises: exception.NetworkNotFound
:raises: exception.QuantumException
"""
result = {}
nvp_lswitches = []
quantum_lswitches = (
super(NvpPluginV2, self).get_networks(context, filters))
if context.is_admin and not filters.get("tenant_id"):
tenant_filter = ""
elif filters.get("tenant_id"):
tenant_filter = ""
for tenant in filters.get("tenant_id"):
tenant_filter += "&tag=%s&tag_scope=os_tid" % tenant
else:
tenant_filter = "&tag=%s&tag_scope=os_tid" % context.tenant_id
lswitch_filters = "uuid,display_name,fabric_status"
lswitch_url_path = (
"/ws.v1/lswitch?fields=%s&relations=LogicalSwitchStatus%s"
% (lswitch_filters, tenant_filter))
try:
for c in self.clusters:
res = nvplib.get_all_query_pages(
lswitch_url_path, c)
nvp_lswitches.extend(res)
except Exception:
LOG.error("Unable to get switches: %s" % traceback.format_exc())
raise exception.QuantumException()
# TODO (Aaron) This can be optimized
if filters.get("id"):
filtered_lswitches = []
for nvp_lswitch in nvp_lswitches:
for id in filters.get("id"):
if id == nvp_lswitch['uuid']:
filtered_lswitches.append(nvp_lswitch)
nvp_lswitches = filtered_lswitches
for quantum_lswitch in quantum_lswitches:
Found = False
for nvp_lswitch in nvp_lswitches:
if nvp_lswitch["uuid"] == quantum_lswitch["id"]:
if (nvp_lswitch["_relations"]["LogicalSwitchStatus"]
["fabric_status"]):
quantum_lswitch["status"] = constants.NET_STATUS_ACTIVE
else:
quantum_lswitch["status"] = constants.NET_STATUS_DOWN
quantum_lswitch["name"] = nvp_lswitch["display_name"]
nvp_lswitches.remove(nvp_lswitch)
Found = True
break
if not Found:
raise Exception("Quantum and NVP Databases are out of Sync!")
# do not make the case in which switches are found in NVP
# but not in Quantum catastrophic.
if len(nvp_lswitches):
LOG.warning("Found %s logical switches not bound "
"to Quantum networks. Quantum and NVP are "
"potentially out of sync", len(nvp_lswitches))
LOG.debug("get_networks() completed for tenant %s" % context.tenant_id)
if fields:
ret_fields = []
for quantum_lswitch in quantum_lswitches:
row = {}
for field in fields:
row[field] = quantum_lswitch[field]
ret_fields.append(row)
return ret_fields
return quantum_lswitches
def update_network(self, context, id, network):
"""
Updates the properties of a particular Virtual Network.
:returns: a sequence of mappings with the following signature:
{'id': UUID representing the network.
'name': Human-readable name identifying the network.
'tenant_id': Owner of network. only admin user
can specify a tenant_id other than its own.
'admin_state_up': Sets admin state of network. if down,
network does not forward packets.
'status': Indicates whether network is currently
operational (limit values to "ACTIVE", "DOWN",
"BUILD", and "ERROR"?
'subnets': Subnets associated with this network. Plan
to allow fully specified subnets as part of
network create.
}
:raises: exception.NetworkNotFound
:raises: exception.NoImplementedError
"""
if network["network"].get("admin_state_up"):
if network['network']["admin_state_up"] is False:
raise exception.NotImplementedError("admin_state_up=False "
"networks are not "
"supported.")
params = {}
params["network"] = network["network"]
pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
#Only field to update in NVP is name
if network['network'].get("name"):
for (cluster, switches) in pairs:
for switch in switches:
result = nvplib.update_network(cluster, switch, **params)
LOG.debug("update_network() completed for tenant: %s" %
context.tenant_id)
return super(NvpPluginV2, self).update_network(context, id, network)
def get_ports(self, context, filters=None, fields=None):
"""
Returns all ports from given tenant
:returns: a sequence of mappings with the following signature:
{'id': UUID representing the network.
'name': Human-readable name identifying the network.
'tenant_id': Owner of network. only admin user
can specify a tenant_id other than its own.
'admin_state_up': Sets admin state of network. if down,
network does not forward packets.
'status': Indicates whether network is currently
operational (limit values to "ACTIVE", "DOWN",
"BUILD", and "ERROR"?
'subnets': Subnets associated with this network. Plan
to allow fully specified subnets as part of
network create.
}
:raises: exception.NetworkNotFound
"""
quantum_lports = super(NvpPluginV2, self).get_ports(context, filters)
vm_filter = ""
tenant_filter = ""
# This is used when calling delete_network. Quantum checks to see if
# the network has any ports.
if filters.get("network_id"):
# FIXME (Aaron) If we get more than one network_id this won't work
lswitch = filters["network_id"][0]
else:
lswitch = "*"
if filters.get("device_id"):
for vm_id in filters.get("device_id"):
vm_filter = ("%stag_scope=vm_id&tag=%s&" % (vm_filter,
hashlib.sha1(vm_id).hexdigest()))
else:
vm_id = ""
if filters.get("tenant_id"):
for tenant in filters.get("tenant_id"):
tenant_filter = ("%stag_scope=os_tid&tag=%s&" %
(tenant_filter, tenant))
nvp_lports = {}
lport_fields_str = ("tags,admin_status_enabled,display_name,"
"fabric_status_up")
try:
for c in self.clusters:
lport_query_path = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter, tenant_filter))
ports = nvplib.get_all_query_pages(lport_query_path, c)
if ports:
for port in ports:
for tag in port["tags"]:
if tag["scope"] == "q_port_id":
nvp_lports[tag["tag"]] = port
except Exception:
LOG.error("Unable to get ports: %s" % traceback.format_exc())
raise exception.QuantumException()
lports = []
for quantum_lport in quantum_lports:
try:
quantum_lport["admin_state_up"] = (
nvp_lports[quantum_lport["id"]]["admin_status_enabled"])
quantum_lport["name"] = (
nvp_lports[quantum_lport["id"]]["display_name"])
if (nvp_lports[quantum_lport["id"]]
["_relations"]
["LogicalPortStatus"]
["fabric_status_up"]):
quantum_lport["status"] = constants.PORT_STATUS_ACTIVE
else:
quantum_lport["status"] = constants.PORT_STATUS_DOWN
del nvp_lports[quantum_lport["id"]]
lports.append(quantum_lport)
except KeyError:
raise Exception("Quantum and NVP Databases are out of Sync!")
# do not make the case in which ports are found in NVP
# but not in Quantum catastrophic.
if len(nvp_lports):
LOG.warning("Found %s logical ports not bound "
"to Quantum ports. Quantum and NVP are "
"potentially out of sync", len(nvp_lports))
if fields:
ret_fields = []
for lport in lports:
row = {}
for field in fields:
row[field] = lport[field]
ret_fields.append(row)
return ret_fields
return lports
def create_port(self, context, port):
"""
Creates a port on the specified Virtual Network.
Returns:
{"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and
"ERROR"?)
"fixed_ips": list of subnet ID's and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exception.NetworkNotFound
:raises: exception.StateInvalid
"""
# Set admin_state_up False since not created in NVP set
port["port"]["admin_state_up"] = False
# First we allocate port in quantum database
try:
quantum_db = super(NvpPluginV2, self).create_port(context, port)
except Exception as e:
raise e
# Update fields obtained from quantum db
port["port"].update(quantum_db)
# We want port to be up in NVP
port["port"]["admin_state_up"] = True
params = {}
params["max_lp_per_bridged_ls"] = \
self.nvp_opts["max_lp_per_bridged_ls"]
params["port"] = port["port"]
params["clusters"] = self.clusters
tenant_id = self._get_tenant_id_for_create(context, port["port"])
try:
port["port"], nvp_port_id = nvplib.create_port(tenant_id,
**params)
nvplib.plug_interface(self.clusters, port["port"]["network_id"],
nvp_port_id, "VifAttachment",
port["port"]["id"])
except Exception as e:
# failed to create port in NVP delete port from quantum_db
super(NvpPluginV2, self).delete_port(context, port["port"]["id"])
raise e
d = {"port-id": port["port"]["id"],
"port-op-status": port["port"]["status"]}
LOG.debug("create_port() completed for tenant %s: %s" %
(tenant_id, d))
# update port with admin_state_up True
port_update = {"port": {"admin_state_up": True}}
return super(NvpPluginV2, self).update_port(context,
port["port"]["id"],
port_update)
def update_port(self, context, id, port):
"""
Updates the properties of a specific port on the
specified Virtual Network.
Returns:
{"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and
"ERROR"?)
"fixed_ips": list of subnet ID's and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exception.StateInvalid
:raises: exception.PortNotFound
"""
params = {}
quantum_db = super(NvpPluginV2, self).get_port(context, id)
port_nvp, cluster = (
nvplib.get_port_by_quantum_tag(self.clusters,
quantum_db["network_id"], id))
LOG.debug("Update port request: %s" % (params))
params["cluster"] = cluster
params["port"] = port["port"]
params["port"]["id"] = quantum_db["id"]
params["port"]["tenant_id"] = quantum_db["tenant_id"]
result = nvplib.update_port(quantum_db["network_id"],
port_nvp["uuid"], **params)
LOG.debug("update_port() completed for tenant: %s" % context.tenant_id)
return super(NvpPluginV2, self).update_port(context, id, port)
def delete_port(self, context, id):
"""
Deletes a port on a specified Virtual Network,
if the port contains a remote interface attachment,
the remote interface is first un-plugged and then the port
is deleted.
:returns: None
:raises: exception.PortInUse
:raises: exception.PortNotFound
:raises: exception.NetworkNotFound
"""
port, cluster = nvplib.get_port_by_quantum_tag(self.clusters,
'*', id)
if port is None:
raise exception.PortNotFound(port_id=id)
# TODO(bgh): if this is a bridged network and the lswitch we just got
# back will have zero ports after the delete we should garbage collect
# the lswitch.
nvplib.delete_port(cluster, port)
LOG.debug("delete_port() completed for tenant: %s" % context.tenant_id)
return super(NvpPluginV2, self).delete_port(context, id)
def get_port(self, context, id, fields=None):
"""
This method allows the user to retrieve a remote interface
that is attached to this particular port.
:returns: a mapping sequence with the following signature:
{'port-id': uuid representing the port on
specified quantum network
'attachment': uuid of the virtual interface
bound to the port, None otherwise
'port-op-status': operational status of the port
'port-state': admin status of the port
}
:raises: exception.PortNotFound
:raises: exception.NetworkNotFound
"""
quantum_db = super(NvpPluginV2, self).get_port(context, id, fields)
port, cluster = (
nvplib.get_port_by_quantum_tag(self.clusters,
quantum_db["network_id"], id))
quantum_db["admin_state_up"] = port["admin_status_enabled"]
if port["_relations"]["LogicalPortStatus"]["fabric_status_up"]:
quantum_db["status"] = constants.PORT_STATUS_ACTIVE
else:
quantum_db["status"] = constants.PORT_STATUS_DOWN
LOG.debug("Port details for tenant %s: %s" %
(context.tenant_id, quantum_db))
return quantum_db
def get_plugin_version(self):
return PLUGIN_VERSION
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.layers.control_flow import lod_rank_table
import numpy
class TestReorderLoDTensor(unittest.TestCase):
num_seq = 5
# [name, shape, lod_level] pair indicating data info of source and target
data_desc = (['input', [9], 0], ['ref', [5], 1])
@classmethod
def setUpClass(cls):
cls.set_program()
@classmethod
def set_program(cls):
dat = fluid.layers.data(
name=cls.data_desc[0][0], shape=cls.data_desc[0][1])
dat.stop_gradient = False
rank_dat = fluid.layers.data(
name=cls.data_desc[1][0], shape=cls.data_desc[1][1])
table = lod_rank_table(rank_dat)
new_dat = fluid.layers.reorder_lod_tensor_by_rank(
x=dat, rank_table=table)
loss = fluid.layers.reduce_sum(new_dat)
fluid.backward.append_backward(loss=loss)
cls.fetch_list = [new_dat, cls.data_desc[0][0] + '@GRAD']
def run_program(self):
outputs = []
input_grads = []
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
for place in places:
self.set_inputs(place)
exe = fluid.Executor(place)
output, input_grad = exe.run(fluid.default_main_program(),
feed=self.inputs,
fetch_list=self.fetch_list,
return_numpy=False)
outputs.append(output)
input_grads.append(input_grad)
self.actual_outputs = outputs
self.actual_grads = input_grads
def set_data(self):
self.data = {}
for desc in self.data_desc:
data_name = desc[0]
data_shape = desc[1]
data_lod_level = desc[2]
data_lod = []
for i in range(data_lod_level):
lod_level_i = numpy.random.randint(
low=1,
high=5,
size=self.num_seq if i == 0 else sum(lod_level_i)).tolist()
data_lod.append(lod_level_i)
data_value = numpy.random.random(
size=[sum(data_lod[-1]) if data_lod else self.num_seq
] + data_shape).astype('float32')
self.data[data_name] = (data_value, data_lod)
def set_inputs(self, place):
self.inputs = {}
for desc in self.data_desc:
tensor = fluid.Tensor()
tensor.set(self.data[desc[0]][0], place)
if self.data[desc[0]][1]:
tensor.set_recursive_sequence_lengths(self.data[desc[0]][1])
self.inputs[desc[0]] = tensor
def reorder(self):
def convert_to_offset(lod):
offset_lod = [[0] for i in lod]
for i, level in enumerate(lod):
for seq_len in level:
offset_lod[i].append(offset_lod[i][-1] + seq_len)
return offset_lod
level = 0
# compute the rank_table according to ref_lod
ref_lod = self.data[self.data_desc[1][0]][1][level]
rank_table = [] # list of (index, length)
for i in range(len(ref_lod)):
rank_table.append((i, ref_lod[i]))
rank_table = sorted(rank_table, lambda x, y: y[1] - x[1])
# compute the input sequence info according to input_lod
input_value, input_lod = self.data[self.data_desc[0][0]]
offset_lod = convert_to_offset(input_lod)
input_table = [] # list of (offset, length, sub_lod)
if offset_lod:
for i in range(len(offset_lod[level]) - 1):
start_idx = i
end_idx = i + 1
sub_lod = []
for lod_level_i in offset_lod[level:]:
sub_lod_i = []
for idx in range(start_idx, end_idx):
sub_lod_i.append(lod_level_i[idx + 1] - lod_level_i[
idx])
sub_lod.append(sub_lod_i)
start_idx = lod_level_i[start_idx]
end_idx = lod_level_i[end_idx]
input_table.append((start_idx, end_idx - start_idx, sub_lod))
else:
input_table = [(i, 1, []) for i in range(len(rank_table))]
# reorder by rank_table
output_value = numpy.zeros_like(input_value)
output_lod = []
offset = 0
for index, length in rank_table:
input_seq_start = input_table[index][0]
input_seq_len = input_table[index][1]
input_seq_end = input_seq_start + input_seq_len
output_value[offset:offset + input_seq_len] = input_value[
input_seq_start:input_seq_end]
offset += input_seq_len
input_seq_sub_lod = input_table[index][2]
if len(output_lod) == 0:
output_lod = [[] for i in input_seq_sub_lod]
for i, level in enumerate(input_seq_sub_lod):
output_lod[i].extend(level)
return output_value, output_lod
def test_reorder_lod_tensor(self):
self.data_desc[0][-1] = 2 # input is lod_tensor
self.set_data()
self.run_program()
# check output
expect_output, expect_output_lod = self.reorder()
for actual_output in self.actual_outputs:
self.assertTrue(
numpy.allclose(
numpy.array(actual_output), expect_output, atol=0.001))
self.assertEqual(expect_output_lod,
actual_output.recursive_sequence_lengths())
# check gradient
expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0])
expect_grad_lod = self.data[self.data_desc[0][0]][1]
for actual_grad in self.actual_grads:
self.assertTrue(
numpy.allclose(
numpy.array(actual_grad), expect_grad, atol=0.001))
self.assertEqual(expect_grad_lod,
actual_grad.recursive_sequence_lengths())
def test_reorder_tensor(self):
self.data_desc[0][-1] = 0 # input is tensor
self.set_data()
self.run_program()
# check output
expect_output, expect_output_lod = self.reorder()
for actual_output in self.actual_outputs:
self.assertTrue(
numpy.allclose(
numpy.array(actual_output), expect_output, atol=0.001))
self.assertEqual(expect_output_lod,
actual_output.recursive_sequence_lengths())
# check gradient
expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0])
expect_grad_lod = self.data[self.data_desc[0][0]][1]
for actual_grad in self.actual_grads:
self.assertTrue(
numpy.allclose(
numpy.array(actual_grad), expect_grad, atol=0.001))
self.assertEqual(expect_grad_lod,
actual_grad.recursive_sequence_lengths())
# compare outputs between LodTensors with explicit and implicit lod
# use the same data but set the input lod explicitly
input_lod = [[1] * len(self.data[self.data_desc[0][0]][0])]
self.inputs[self.data_desc[0][0]].set_recursive_sequence_lengths(
input_lod)
# preserve the output of LodTensor with implicit lod to compare
expect_output = [
numpy.array(actual_output) for actual_output in self.actual_outputs
]
self.run_program()
for actual_output in self.actual_outputs:
self.assertTrue(
numpy.allclose(
numpy.array(actual_output), expect_output, atol=0.001))
if __name__ == '__main__':
unittest.main()
| |
"""
@file coi-services/mi.idk/comm_config.py
@author Bill French
@brief Comm Configuration object used to gather and store connection information for the logger.
Usage:
#
# Create a CommConfig object. Use the factory method to get the correct object type.
#
comm_config = get_config_from_type(filename, 'ethernet'):
#
# Get config from the console (prompts for type)
#
comm_config = comm_config.get_from_console(filename)
#
# List all know CommConfig types
#
valid_types = CommConfig.valid_type_list()
"""
__author__ = 'Bill French'
__license__ = 'Apache 2.0'
import os
import yaml
from mi.idk import prompt
from mi.core.log import get_logger
from mi.idk.exceptions import NoConfigFileSpecified
from mi.idk.exceptions import CommConfigReadFail
from mi.idk.exceptions import InvalidCommType
from mi.core.common import BaseEnum
log = get_logger()
DEFAULT_HOST = 'localhost'
DEFAULT_DATA_PORT = 6001
DEFAULT_CMD_PORT = 6002
DEFAULT_SNIFFER_PORT = 6003
DEFAULT_INSTRUMENT_CMD_PORT = 6004
class ConfigTypes(BaseEnum):
RSN = 'rsn'
TCP = 'tcp'
SERIAL = 'serial'
BOTPT = 'botpt'
MULTI = 'multi'
class CommConfig(object):
"""
Object to collect and store logger configuration information
"""
def __init__(self, config_file_path=None):
"""
@brief Constructor, attempt to read from a config file
@param metadata IDK Metadata object
"""
self.config_file_path = None
self.data_port = None
self.command_port = None
self.host = DEFAULT_HOST
self.sniffer_port = DEFAULT_SNIFFER_PORT
self.sniffer_prefix = None
self.sniffer_suffix = None
if config_file_path:
self.read_from_file(config_file_path)
def __getitem__(self, *args):
return args
def _init_from_yaml(self, yaml_input):
"""
@brief initialize the object from yaml data. This method should be sub classed
@param yaml_input yaml data structure
"""
if yaml_input:
self.config_type = yaml_input['comm'].get('method')
self.host = yaml_input['comm'].get('host')
self.data_port = yaml_input['comm'].get('data_port')
self.command_port = yaml_input['comm'].get('command_port')
self.sniffer_port = yaml_input['comm'].get('sniffer_port')
self.sniffer_prefix = yaml_input['comm'].get('sniffer_prefix')
self.sniffer_suffix = yaml_input['comm'].get('sniffer_suffix')
if self.data_port: self.data_port = int(self.data_port)
if self.command_port: self.command_port = int(self.command_port)
def _config_dictionary(self):
"""
@brief get a dictionary of configuration parameters. This method should be sub classed to extend config
@retval dictionary containing all config parameters.
"""
if self.data_port: self.data_port = int(self.data_port)
if self.command_port: self.command_port = int(self.command_port)
if self.sniffer_port: self.sniffer_port = int(self.sniffer_port)
result = {'method': self.method(),
'data_port': self.data_port,
'command_port': self.command_port,
'host': self.host,
'sniffer_port': self.sniffer_port,
}
if self.sniffer_prefix: result['sniffer_prefix'] = self.sniffer_prefix
if self.sniffer_suffix: result['sniffer_suffix'] = self.sniffer_suffix
return result
###
# Public Methods
###
def display_config(self):
"""
@brief Pretty print object configuration to stdout. This method should be sub classed.
"""
print("Type: " + self.method())
print("PA Command Port: " + str(self.command_port))
print("PA Data Port: " + str(self.data_port))
def serialize(self):
"""
@brief Get yaml dump of object data
@retval yaml string of object data
"""
return yaml.dump({'comm': self._config_dictionary()}, default_flow_style=False)
def dict(self):
"""
@brief Return a dict for the comm config
@retval dict of all comm config data
"""
return self._config_dictionary()
def store_to_file(self):
"""
@brief Store object config data to a config file.
"""
if not self.config_file_path:
raise NoConfigFileSpecified()
log.info("store config to %s" % self.config_file_path)
ofile = open(self.config_file_path, 'w')
ofile.write(self.serialize())
ofile.close()
def read_from_file(self, filename):
"""
@brief Read config file and initialize this object
@param filename filename that contains the config
"""
self.config_file_path = filename
log.debug("read comm config file %s", filename)
# If the config file doesn't exists don't read
if self.config_file_path and os.path.exists(self.config_file_path):
try:
infile = open(filename, "r")
input = yaml.load(infile)
if input:
self._init_from_yaml(input)
infile.close()
except IOError:
raise CommConfigReadFail(msg="filename: %s" % filename)
def get_from_console(self):
"""
@brief Read comm config from the console. This should be overloaded in a sub class.
"""
if not self.host: self.host = DEFAULT_HOST
if not self.data_port: self.data_port = DEFAULT_DATA_PORT
if not self.command_port: self.command_port = DEFAULT_CMD_PORT
if not self.sniffer_port: self.sniffer_port = DEFAULT_SNIFFER_PORT
self.host = prompt.text('Port Agent Host', self.host)
self.data_port = prompt.text('Port Agent Data Port', self.data_port)
self.command_port = prompt.text('Port Agent Command Port', self.command_port)
self.sniffer_port = prompt.text('Port Agent Sniffer Port', self.sniffer_port)
#self.sniffer_prefix = prompt.text( 'Port Agent Sniffer Prefix', self.sniffer_prefix )
#self.sniffer_suffix = prompt.text( 'Port Agent Sniffer Suffix', self.sniffer_suffix )
if self.confirm_config():
self.store_to_file()
else:
return self.get_from_console()
def confirm_config(self):
"""
@brief Is the data entered on the console valid? This should be overloaded in the sub class to do something useful.
"""
return True
###
# Static Methods
###
@staticmethod
def config_filename():
"""
@brief name of the file that stores the comm configuration yaml
"""
return "comm_config.yml"
@staticmethod
def method():
"""
@brief Defines the "type" of object. This must be overloaded in the sub class.
@retval type of comm configuration object.
"""
return False
@staticmethod
def get_config_from_console(filename, default_type=ConfigTypes.TCP):
"""
@brief Factory method. Prompt and read the config type from the console
@param filename The file where the comm config is stored in
@retval A CommConfig object for the type entered on the console
"""
print( "\nDriver Comm Configuration" )
# Currently there is only one connection type so let's just default to that
type = prompt.text('Type [' + CommConfig.valid_type_string() + ']', default_type)
#type=ConfigTypes.ETHERNET
#print "Type: ethernet"
config = CommConfig.get_config_from_type(filename, type)
if config:
return config
else:
return CommConfig.get_config_from_console(filename, default_type)
@staticmethod
def get_config_from_type(filename, type):
"""
@brief Factory method. Get a CommConfig object for the type passed in
@param filename The file where the comm config is stored in
@param type Type of CommConfig object to create
@retval A CommConfig object for the type entered on the console
"""
valid_types = CommConfig.valid_type_list()
if type in valid_types:
if ConfigTypes.TCP == type:
config = CommConfigEthernet(filename)
elif ConfigTypes.RSN == type:
config = CommConfigRSN(filename)
elif ConfigTypes.BOTPT == type:
config = CommConfigBOTPT(filename)
elif ConfigTypes.SERIAL == type:
config = CommConfigSerial(filename)
elif ConfigTypes.MULTI == type:
config = CommConfigMulti(filename)
return config
else:
raise InvalidCommType(msg=type)
@staticmethod
def get_config_from_file(filename):
"""
@brief Factory method. Get a CommConfig object for the type stored in a driver comm_config file
@param filename The file where the comm config is stored in
@retval A CommConfig object for the type specified in the comm config file.
"""
config = CommConfig(filename)
return CommConfig.get_config_from_type(filename, config.config_type)
@staticmethod
def valid_type_list():
"""
@brief List all know types of CommConfig objects
@retval list of all know CommConfig objects
"""
result = []
for config in _CONFIG_OBJECTS:
result.append(config.method())
return result
@staticmethod
def valid_type_string():
"""
@brief Get a pretty print list of valid CommConfig object types
@retval comma delimited string of valid CommConfig object types
"""
return ", ".join(CommConfig.valid_type_list())
class CommConfigEthernet(CommConfig):
"""
Ethernet CommConfig object. Defines data store for ethernet based loggers connections
"""
@staticmethod
def method(): return ConfigTypes.TCP
def __init__(self, filename):
self.device_addr = None
self.device_port = None
CommConfig.__init__(self, filename)
def _init_from_yaml(self, yaml_input):
CommConfig._init_from_yaml(self, yaml_input)
if yaml_input:
self.device_addr = yaml_input['comm'].get('device_addr')
self.device_port = yaml_input['comm'].get('device_port')
def get_from_console(self):
self.device_addr = prompt.text('Device Address', self.device_addr)
self.device_port = prompt.text('Device Port', self.device_port)
CommConfig.get_from_console(self)
def display_config(self):
CommConfig.display_config(self)
print( "Device Address: " + self.device_addr )
print( "Device Port: " + str(self.device_port))
def _config_dictionary(self):
config = CommConfig._config_dictionary(self)
config['device_addr'] = self.device_addr
config['device_port'] = int(self.device_port)
return config
class CommConfigRSN(CommConfig):
"""
RSN CommConfig object. Defines data store for RSN ethernet based loggers connections
"""
@staticmethod
def method(): return ConfigTypes.RSN
def __init__(self, filename):
self.device_addr = None
self.device_port = None
self.instrument_command_port = None
CommConfig.__init__(self, filename)
def _init_from_yaml(self, yaml_input):
CommConfig._init_from_yaml(self, yaml_input)
if yaml_input:
self.device_addr = yaml_input['comm'].get('device_addr')
self.device_port = yaml_input['comm'].get('device_port')
self.instrument_command_port = yaml_input['comm'].get('instrument_command_port')
def get_from_console(self):
if not self.instrument_command_port: self.instrument_command_port = DEFAULT_INSTRUMENT_CMD_PORT
self.device_addr = prompt.text('Device Address', self.device_addr)
self.device_port = prompt.text('Device Port', self.device_port)
self.instrument_command_port = prompt.text('Instrument Command Port', self.instrument_command_port)
CommConfig.get_from_console(self)
def display_config(self):
CommConfig.display_config(self)
print( "Device Address: " + self.device_addr )
print( "Device Port: " + str(self.device_port))
print( "Instrument Command Port: " + str(self.instrument_command_port))
def _config_dictionary(self):
config = CommConfig._config_dictionary(self)
config['device_addr'] = self.device_addr
config['device_port'] = int(self.device_port)
config['instrument_command_port'] = int(self.instrument_command_port)
return config
class CommConfigBOTPT(CommConfig):
"""
BOTPT CommConfig object. Defines data store for botpt connections
"""
@staticmethod
def method(): return ConfigTypes.BOTPT
def __init__(self, filename):
self.device_addr = None
self.device_tx_port = None
self.device_rx_port = None
CommConfig.__init__(self, filename)
def _init_from_yaml(self, yaml_input):
CommConfig._init_from_yaml(self, yaml_input)
if yaml_input:
self.device_addr = yaml_input['comm'].get('device_addr')
self.device_tx_port = yaml_input['comm'].get('device_tx_port')
self.device_rx_port = yaml_input['comm'].get('device_rx_port')
def get_from_console(self):
self.device_addr = prompt.text('Device Address', self.device_addr)
self.device_tx_port = prompt.text('Device TX Port', self.device_tx_port)
self.device_rx_port = prompt.text('Device RX Port', self.device_rx_port)
CommConfig.get_from_console(self)
def display_config(self):
CommConfig.display_config(self)
print("Device Address: " + self.device_addr)
print("Device TX Port: " + str(self.device_tx_port))
print("Device RX Port: " + str(self.device_rx_port))
def _config_dictionary(self):
config = CommConfig._config_dictionary(self)
config['device_addr'] = self.device_addr
config['device_tx_port'] = int(self.device_tx_port)
config['device_rx_port'] = int(self.device_rx_port)
return config
class CommConfigSerial(CommConfig):
"""
Serial CommConfig object. Defines data store for serial based loggers connections
"""
@staticmethod
def method():
return ConfigTypes.SERIAL
def __init__(self, filename):
self.device_os_port = None
self.device_baud = None
self.device_data_bits = None
self.device_parity = None
self.device_stop_bits = None
self.device_flow_control = None # hardware/software/none
CommConfig.__init__(self, filename)
def _init_from_yaml(self, yaml_input):
CommConfig._init_from_yaml(self, yaml_input)
if yaml_input:
self.device_os_port = yaml_input['comm'].get('device_os_port')
self.device_baud = yaml_input['comm'].get('device_baud')
self.device_data_bits = yaml_input['comm'].get('device_data_bits')
self.device_parity = yaml_input['comm'].get('device_parity')
self.device_stop_bits = yaml_input['comm'].get('device_stop_bits')
self.device_flow_control = yaml_input['comm'].get('device_flow_control')
def get_from_console(self):
self.device_os_port = prompt.text('Device OS Port', self.device_os_port)
self.device_baud = prompt.text('Device Baud', self.device_baud)
if int(self.device_baud) not in [1200, 2400, 4800, 9600, 19200, 38400, 57600, 115200]:
raise InvalidCommType(str(self.device_baud) + " is not an allowed value for device baud. " + \
"[1200, 2400, 4800, 9600, 19200, 38400, 57600, 115200]")
self.device_data_bits = prompt.text('Device Data Bits', self.device_data_bits)
if int(self.device_data_bits) not in [5, 6, 7, 8]:
raise InvalidCommType(str(self.device_data_bits) + \
" is not an allowed value for device data bits [5, 6, 7, 8].")
self.device_parity = prompt.text('Device Parity', self.device_parity)
if 'n' == self.device_parity.lower() or 'none' == self.device_parity.lower():
self.device_parity = 0
elif 'o' == self.device_parity.lower() or 'odd' == self.device_parity.lower():
self.device_parity = 1
elif 'e' == self.device_parity.lower() or 'even' == self.device_parity.lower():
self.device_parity = 2
elif 0 <= self.device_parity <= 2:
"acceptable"
else:
raise InvalidCommType(str(self.device_parity) + \
" is not an allowed value for device parity. [none, odd, even]")
self.device_stop_bits = prompt.text('Device Stop Bits', self.device_stop_bits)
if int(self.device_stop_bits) not in [0, 1, 2]:
raise InvalidCommType(str(self.device_stop_bits) + \
" is not an allowed value for device stop bits [0, 1, 2].")
self.device_flow_control = prompt.text('Device Flow Control', self.device_flow_control)
if 'n' == self.device_flow_control.lower() or 'none' == self.device_flow_control.lower():
self.device_flow_control = 0
elif 'h' == self.device_flow_control.lower() or 'hardware' == self.device_flow_control.lower():
self.device_flow_control = 1
elif 's' == self.device_flow_control.lower() or 'software' == self.device_flow_control.lower():
self.device_flow_control = 2
elif 0 <= self.device_flow_control <= 2:
"acceptable"
else:
raise InvalidCommType(str(self.device_flow_control) + \
" is not an allowed value for device flow control. [none, hardware, software]")
CommConfig.get_from_console(self)
def display_config(self):
PARITY = ['none', 'odd', 'even']
FLOW_CONTROL = ['none', 'hardware', 'software']
CommConfig.display_config(self)
print( "Device OS Port: " + str(self.device_os_port))
print( "Device Baud: " + str(self.device_baud))
print( "Device Data Bits: " + str(self.device_data_bits))
print( "Device Parity: " + PARITY[self.device_parity])
print( "Device Stop Bits: " + str(self.device_stop_bits))
print( "Device Flow Control: " + FLOW_CONTROL[self.device_flow_control])
def _config_dictionary(self):
config = CommConfig._config_dictionary(self)
config['device_os_port'] = self.device_os_port
config['device_baud'] = int(self.device_baud)
config['device_data_bits'] = int(self.device_data_bits)
config['device_parity'] = int(self.device_parity)
config['device_stop_bits'] = int(self.device_stop_bits)
config['device_flow_control'] = int(self.device_flow_control)
return config
class CommConfigMulti(CommConfig):
"""
Multi connection CommConfig object. Defines data store for multi-connection drivers
"""
@staticmethod
def method():
return ConfigTypes.MULTI
def __init__(self, filename):
self.config_type = ConfigTypes.MULTI
self.configs = {}
CommConfig.__init__(self, filename)
def _init_from_yaml(self, yaml_input):
CommConfig._init_from_yaml(self, yaml_input)
for key, value in yaml_input.get('comm', {}).get('configs').items():
method = value['comm'].get('method')
if method == ConfigTypes.SERIAL:
self.configs[key] = CommConfigSerial(None)
elif method == ConfigTypes.TCP:
self.configs[key] = CommConfigEthernet(None)
elif method == ConfigTypes.RSN:
self.configs[key] = CommConfigRSN(None)
else:
raise InvalidCommType('%s' % method)
self.configs[key]._init_from_yaml(yaml_input['comm']['configs'][key])
def get_from_console(self):
print 'Please create this comm config by hand...'
def display_config(self):
for name, config in self.configs.items():
print name
config.display_config()
def _config_dictionary(self):
result = {
'config_type': self.method(),
'configs': {},
}
for name, config in self.configs.items():
result['configs']['name'] = config._config_dictionary()
return result
# List of all known CommConfig objects
_CONFIG_OBJECTS = [CommConfigEthernet, CommConfigRSN, CommConfigBOTPT, CommConfigSerial, CommConfigMulti]
if __name__ == '__main__':
pass
| |
# Copyright 2013 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client side of the conductor RPC API."""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from nova.objects import base as objects_base
from nova import rpc
CONF = cfg.CONF
rpcapi_cap_opt = cfg.StrOpt('conductor',
help='Set a version cap for messages sent to conductor services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class ConductorAPI(object):
"""Client side of the conductor RPC API
API version history:
* 1.0 - Initial version.
* 1.1 - Added migration_update
* 1.2 - Added instance_get_by_uuid and instance_get_all_by_host
* 1.3 - Added aggregate_host_add and aggregate_host_delete
* 1.4 - Added migration_get
* 1.5 - Added bw_usage_update
* 1.6 - Added get_backdoor_port()
* 1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
and aggregate_metadata_delete
* 1.8 - Added security_group_get_by_instance and
security_group_rule_get_by_security_group
* 1.9 - Added provider_fw_rule_get_all
* 1.10 - Added agent_build_get_by_triple
* 1.11 - Added aggregate_get
* 1.12 - Added block_device_mapping_update_or_create
* 1.13 - Added block_device_mapping_get_all_by_instance
* 1.14 - Added block_device_mapping_destroy
* 1.15 - Added instance_get_all_by_filters and
instance_get_all_hung_in_rebooting and
instance_get_active_by_window
Deprecated instance_get_all_by_host
* 1.16 - Added instance_destroy
* 1.17 - Added instance_info_cache_delete
* 1.18 - Added instance_type_get
* 1.19 - Added vol_get_usage_by_time and vol_usage_update
* 1.20 - Added migration_get_unconfirmed_by_dest_compute
* 1.21 - Added service_get_all_by
* 1.22 - Added ping
* 1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
* 1.24 - Added instance_get
* 1.25 - Added action_event_start and action_event_finish
* 1.26 - Added instance_info_cache_update
* 1.27 - Added service_create
* 1.28 - Added binary arg to service_get_all_by
* 1.29 - Added service_destroy
* 1.30 - Added migration_create
* 1.31 - Added migration_get_in_progress_by_host_and_node
* 1.32 - Added optional node to instance_get_all_by_host
* 1.33 - Added compute_node_create and compute_node_update
* 1.34 - Added service_update
* 1.35 - Added instance_get_active_by_window_joined
* 1.36 - Added instance_fault_create
* 1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
* 1.38 - Added service name to instance_update
* 1.39 - Added notify_usage_exists
* 1.40 - Added security_groups_trigger_handler and
security_groups_trigger_members_refresh
Remove instance_get_active_by_window
* 1.41 - Added fixed_ip_get_by_instance, network_get,
instance_floating_address_get_all, quota_commit,
quota_rollback
* 1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
* 1.43 - Added compute_stop
* 1.44 - Added compute_node_delete
* 1.45 - Added project_id to quota_commit and quota_rollback
* 1.46 - Added compute_confirm_resize
* 1.47 - Added columns_to_join to instance_get_all_by_host and
instance_get_all_by_filters
* 1.48 - Added compute_unrescue
... Grizzly supports message version 1.48. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.48.
* 1.49 - Added columns_to_join to instance_get_by_uuid
* 1.50 - Added object_action() and object_class_action()
* 1.51 - Added the 'legacy' argument to
block_device_mapping_get_all_by_instance
* 1.52 - Pass instance objects for compute_confirm_resize
* 1.53 - Added compute_reboot
* 1.54 - Added 'update_cells' argument to bw_usage_update
* 1.55 - Pass instance objects for compute_stop
* 1.56 - Remove compute_confirm_resize and
migration_get_unconfirmed_by_dest_compute
* 1.57 - Remove migration_create()
* 1.58 - Remove migration_get()
... Havana supports message version 1.58. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.58.
* 1.59 - Remove instance_info_cache_update()
* 1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete()
* ... - Remove security_group_get_by_instance() and
security_group_rule_get_by_security_group()
* 1.61 - Return deleted instance from instance_destroy()
* 1.62 - Added object_backport()
* 1.63 - Changed the format of values['stats'] from a dict to a JSON string
in compute_node_update()
* 1.64 - Added use_slave to instance_get_all_filters()
- Remove instance_type_get()
- Remove aggregate_get()
- Remove aggregate_get_by_host()
- Remove instance_get()
- Remove migration_update()
- Remove block_device_mapping_destroy()
* 2.0 - Drop backwards compatibility
- Remove quota_rollback() and quota_commit()
- Remove aggregate_host_add() and aggregate_host_delete()
- Remove network_migrate_instance_start() and
network_migrate_instance_finish()
... Icehouse supports message version 2.0. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.0.
* Remove instance_destroy()
* Remove compute_unrescue()
* Remove instance_get_all_by_filters()
* Remove instance_get_active_by_window_joined()
* Remove instance_fault_create()
* Remove action_event_start() and action_event_finish()
* Remove instance_get_by_uuid()
* Remove agent_build_get_by_triple()
* 2.1 - Make notify_usage_exists() take an instance object
... Juno supports message version 2.0. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.0.
"""
VERSION_ALIASES = {
'grizzly': '1.48',
'havana': '1.58',
'icehouse': '2.0',
'juno': '2.0',
}
def __init__(self):
super(ConductorAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic, version='2.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.conductor,
CONF.upgrade_levels.conductor)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def instance_update(self, context, instance_uuid, updates,
service=None):
updates_p = jsonutils.to_primitive(updates)
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_update',
instance_uuid=instance_uuid,
updates=updates_p,
service=service)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
cctxt = self.client.prepare()
return cctxt.call(context,
'migration_get_in_progress_by_host_and_node',
host=host, node=node)
def aggregate_metadata_get_by_host(self, context, host, key):
cctxt = self.client.prepare()
return cctxt.call(context, 'aggregate_metadata_get_by_host',
host=host,
key=key)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None, update_cells=True):
msg_kwargs = dict(uuid=uuid, mac=mac, start_period=start_period,
bw_in=bw_in, bw_out=bw_out, last_ctr_in=last_ctr_in,
last_ctr_out=last_ctr_out,
last_refreshed=last_refreshed,
update_cells=update_cells)
cctxt = self.client.prepare()
return cctxt.call(context, 'bw_usage_update', **msg_kwargs)
def provider_fw_rule_get_all(self, context):
cctxt = self.client.prepare()
return cctxt.call(context, 'provider_fw_rule_get_all')
def block_device_mapping_update_or_create(self, context, values,
create=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'block_device_mapping_update_or_create',
values=values, create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'block_device_mapping_get_all_by_instance',
instance=instance_p, legacy=legacy)
def vol_get_usage_by_time(self, context, start_time):
start_time_p = jsonutils.to_primitive(start_time)
cctxt = self.client.prepare()
return cctxt.call(context, 'vol_get_usage_by_time',
start_time=start_time_p)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'vol_usage_update',
vol_id=vol_id, rd_req=rd_req,
rd_bytes=rd_bytes, wr_req=wr_req,
wr_bytes=wr_bytes,
instance=instance_p, last_refreshed=last_refreshed,
update_totals=update_totals)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'service_get_all_by',
topic=topic, host=host, binary=binary)
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_get_all_by_host',
host=host, node=node,
columns_to_join=columns_to_join)
def service_create(self, context, values):
cctxt = self.client.prepare()
return cctxt.call(context, 'service_create', values=values)
def service_destroy(self, context, service_id):
cctxt = self.client.prepare()
return cctxt.call(context, 'service_destroy', service_id=service_id)
def compute_node_create(self, context, values):
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_node_create', values=values)
def compute_node_update(self, context, node, values):
node_p = jsonutils.to_primitive(node)
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_node_update',
node=node_p, values=values)
def compute_node_delete(self, context, node):
node_p = jsonutils.to_primitive(node)
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_node_delete', node=node_p)
def service_update(self, context, service, values):
service_p = jsonutils.to_primitive(service)
# (NOTE:jichenjc)If we're calling this periodically, it makes no
# sense for the RPC timeout to be more than the service
# report interval. Select 5 here is only find a reaonable long
# interval as threshold.
timeout = CONF.report_interval
if timeout and timeout > 5:
timeout -= 1
if timeout:
cctxt = self.client.prepare(timeout=timeout)
else:
cctxt = self.client.prepare()
return cctxt.call(context, 'service_update',
service=service_p, values=values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'task_log_get',
task_name=task_name, begin=begin, end=end,
host=host, state=state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'task_log_begin_task',
task_name=task_name,
begin=begin, end=end, host=host,
task_items=task_items, message=message)
def task_log_end_task(self, context, task_name, begin, end, host, errors,
message=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'task_log_end_task',
task_name=task_name, begin=begin, end=end,
host=host, errors=errors, message=message)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
if self.client.can_send_version('2.1'):
version = '2.1'
else:
version = '2.0'
instance = jsonutils.to_primitive(instance)
system_metadata_p = jsonutils.to_primitive(system_metadata)
extra_usage_info_p = jsonutils.to_primitive(extra_usage_info)
cctxt = self.client.prepare(version=version)
return cctxt.call(
context, 'notify_usage_exists',
instance=instance,
current_period=current_period,
ignore_missing_network_data=ignore_missing_network_data,
system_metadata=system_metadata_p,
extra_usage_info=extra_usage_info_p)
def security_groups_trigger_handler(self, context, event, args):
args_p = jsonutils.to_primitive(args)
cctxt = self.client.prepare()
return cctxt.call(context, 'security_groups_trigger_handler',
event=event, args=args_p)
def security_groups_trigger_members_refresh(self, context, group_ids):
cctxt = self.client.prepare()
return cctxt.call(context, 'security_groups_trigger_members_refresh',
group_ids=group_ids)
def get_ec2_ids(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'get_ec2_ids',
instance=instance_p)
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_class_action',
objname=objname, objmethod=objmethod,
objver=objver, args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport(self, context, objinst, target_version):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_backport', objinst=objinst,
target_version=target_version)
class ComputeTaskAPI(object):
"""Client side of the conductor 'compute' namespaced RPC API
API version history:
1.0 - Initial version (empty).
1.1 - Added unified migrate_server call.
1.2 - Added build_instances
1.3 - Added unshelve_instance
1.4 - Added reservations to migrate_server.
1.5 - Added the leagacy_bdm parameter to build_instances
1.6 - Made migrate_server use instance objects
1.7 - Do not send block_device_mapping and legacy_bdm to build_instances
1.8 - Add rebuild_instance
1.9 - Converted requested_networks to NetworkRequestList object
1.10 - Made migrate_server() and build_instances() send flavor objects
1.11 - Added clean_shutdown to migrate_server()
"""
def __init__(self):
super(ComputeTaskAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic,
namespace='compute_task',
version='1.0')
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, serializer=serializer)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
reservations=None, clean_shutdown=True):
kw = {'instance': instance, 'scheduler_hint': scheduler_hint,
'live': live, 'rebuild': rebuild, 'flavor': flavor,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit,
'reservations': reservations,
'clean_shutdown': clean_shutdown}
version = '1.11'
if not self.client.can_send_version(version):
del kw['clean_shutdown']
version = '1.10'
if not self.client.can_send_version(version):
kw['flavor'] = objects_base.obj_to_primitive(flavor)
version = '1.6'
if not self.client.can_send_version(version):
kw['instance'] = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '1.4'
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'migrate_server', **kw)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
image_p = jsonutils.to_primitive(image)
version = '1.10'
if not self.client.can_send_version(version):
version = '1.9'
if 'instance_type' in filter_properties:
flavor = filter_properties['instance_type']
flavor_p = objects_base.obj_to_primitive(flavor)
filter_properties = dict(filter_properties,
instance_type=flavor_p)
kw = {'instances': instances, 'image': image_p,
'filter_properties': filter_properties,
'admin_password': admin_password,
'injected_files': injected_files,
'requested_networks': requested_networks,
'security_groups': security_groups}
if not self.client.can_send_version(version):
version = '1.8'
kw['requested_networks'] = kw['requested_networks'].as_tuples()
if not self.client.can_send_version('1.7'):
version = '1.5'
kw.update({'block_device_mapping': block_device_mapping,
'legacy_bdm': legacy_bdm})
cctxt = self.client.prepare(version=version)
cctxt.cast(context, 'build_instances', **kw)
def unshelve_instance(self, context, instance):
cctxt = self.client.prepare(version='1.3')
cctxt.cast(context, 'unshelve_instance', instance=instance)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, kwargs=None):
cctxt = self.client.prepare(version='1.8')
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
recreate=recreate, on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host)
| |
from ..utils import appid, have_appserver, on_production_server
from .creation import DatabaseCreation
from django.db.backends.util import format_number
from djangotoolbox.db.base import NonrelDatabaseFeatures, \
NonrelDatabaseOperations, NonrelDatabaseWrapper, NonrelDatabaseClient, \
NonrelDatabaseValidation, NonrelDatabaseIntrospection
from urllib2 import HTTPError, URLError
import logging
import os
import time
REMOTE_API_SCRIPT = '$PYTHON_LIB/google/appengine/ext/remote_api/handler.py'
def auth_func():
import getpass
return raw_input('Login via Google Account (see note above if login fails): '), getpass.getpass('Password: ')
def rpc_server_factory(*args, ** kwargs):
from google.appengine.tools import appengine_rpc
kwargs['save_cookies'] = True
return appengine_rpc.HttpRpcServer(*args, ** kwargs)
def get_datastore_paths(options):
"""Returns a tuple with the path to the datastore and history file.
The datastore is stored in the same location as dev_appserver uses by
default, but the name is altered to be unique to this project so multiple
Django projects can be developed on the same machine in parallel.
Returns:
(datastore_path, history_path)
"""
from google.appengine.tools import dev_appserver_main
datastore_path = options.get('datastore_path',
dev_appserver_main.DEFAULT_ARGS['datastore_path'].replace(
'dev_appserver', 'django_%s' % appid))
blobstore_path = options.get('blobstore_path',
dev_appserver_main.DEFAULT_ARGS['blobstore_path'].replace(
'dev_appserver', 'django_%s' % appid))
history_path = options.get('history_path',
dev_appserver_main.DEFAULT_ARGS['history_path'].replace(
'dev_appserver', 'django_%s' % appid))
return datastore_path, blobstore_path, history_path
def get_test_datastore_paths(inmemory=True):
"""Returns a tuple with the path to the test datastore and history file.
If inmemory is true, (None, None) is returned to request an in-memory
datastore. If inmemory is false the path returned will be similar to the path
returned by get_datastore_paths but with a different name.
Returns:
(datastore_path, history_path)
"""
if inmemory:
return None, None, None
datastore_path, blobstore_path, history_path = get_datastore_paths()
datastore_path = datastore_path.replace('.datastore', '.testdatastore')
blobstore_path = blobstore_path.replace('.blobstore', '.testblobstore')
history_path = history_path.replace('.datastore', '.testdatastore')
return datastore_path, blobstore_path, history_path
def destroy_datastore(*args):
"""Destroys the appengine datastore at the specified paths."""
for path in args:
if not path:
continue
try:
os.remove(path)
except OSError, error:
if error.errno != 2:
logging.error("Failed to clear datastore: %s" % error)
class DatabaseFeatures(NonrelDatabaseFeatures):
allows_primary_key_0 = True
supports_dicts = True
class DatabaseOperations(NonrelDatabaseOperations):
compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'
DEFAULT_MAX_DIGITS = 16
def value_to_db_decimal(self, value, max_digits, decimal_places):
if value is None:
return None
sign = value < 0 and u'-' or u''
if sign:
value = abs(value)
if max_digits is None:
max_digits = self.DEFAULT_MAX_DIGITS
if decimal_places is None:
value = unicode(value)
else:
value = format_number(value, max_digits, decimal_places)
decimal_places = decimal_places or 0
n = value.find('.')
if n < 0:
n = len(value)
if n < max_digits - decimal_places:
value = u"0" * (max_digits - decimal_places - n) + value
return sign + value
def sql_flush(self, style, tables, sequences):
self.connection.flush()
return []
class DatabaseClient(NonrelDatabaseClient):
pass
class DatabaseValidation(NonrelDatabaseValidation):
pass
class DatabaseIntrospection(NonrelDatabaseIntrospection):
pass
class DatabaseWrapper(NonrelDatabaseWrapper):
def __init__(self, *args, **kwds):
super(DatabaseWrapper, self).__init__(*args, **kwds)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.validation = DatabaseValidation(self)
self.introspection = DatabaseIntrospection(self)
options = self.settings_dict
self.use_test_datastore = False
self.test_datastore_inmemory = True
self.remote = options.get('REMOTE', False)
if on_production_server:
self.remote = False
self.remote_app_id = options.get('REMOTE_APP_ID', appid)
self.remote_api_path = options.get('REMOTE_API_PATH', None)
self.secure_remote_api = options.get('SECURE_REMOTE_API', True)
self._setup_stubs()
def _get_paths(self):
if self.use_test_datastore:
return get_test_datastore_paths(self.test_datastore_inmemory)
else:
return get_datastore_paths(self.settings_dict)
def _setup_stubs(self):
# If this code is being run without an appserver (eg. via a django
# commandline flag) then setup a default stub environment.
if not have_appserver:
from google.appengine.tools import dev_appserver_main
args = dev_appserver_main.DEFAULT_ARGS.copy()
args['datastore_path'], args['blobstore_path'], args['history_path'] = self._get_paths()
from google.appengine.tools import dev_appserver
dev_appserver.SetupStubs(appid, **args)
# If we're supposed to set up the remote_api, do that now.
if self.remote:
self.setup_remote()
def setup_remote(self):
if not self.remote_api_path:
from ..utils import appconfig
for handler in appconfig.handlers:
if handler.script == REMOTE_API_SCRIPT:
self.remote_api_path = handler.url.split('(', 1)[0]
break
self.remote = True
remote_url = 'https://%s.appspot.com%s' % (self.remote_app_id,
self.remote_api_path)
logging.info('Setting up remote_api for "%s" at %s' %
(self.remote_app_id, remote_url))
if not have_appserver:
print('Connecting to remote_api handler.\n\n'
'IMPORTANT: Check your login method settings in the '
'App Engine Dashboard if you have problems logging in. '
'Login is only supported for Google Accounts.\n')
from google.appengine.ext.remote_api import remote_api_stub
remote_api_stub.ConfigureRemoteApi(self.remote_app_id,
self.remote_api_path, auth_func, secure=self.secure_remote_api,
rpc_server_factory=rpc_server_factory)
retry_delay = 1
while retry_delay <= 16:
try:
remote_api_stub.MaybeInvokeAuthentication()
except HTTPError, e:
if not have_appserver:
print 'Retrying in %d seconds...' % retry_delay
time.sleep(retry_delay)
retry_delay *= 2
else:
break
else:
try:
remote_api_stub.MaybeInvokeAuthentication()
except HTTPError, e:
raise URLError("%s\n"
"Couldn't reach remote_api handler at %s.\n"
"Make sure you've deployed your project and "
"installed a remote_api handler in app.yaml."
% (e, remote_url))
logging.info('Now using the remote datastore for "%s" at %s' %
(self.remote_app_id, remote_url))
def flush(self):
"""Helper function to remove the current datastore and re-open the stubs"""
if self.remote:
import random, string
code = ''.join([random.choice(string.ascii_letters) for x in range(4)])
print '\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print "Warning! You're about to delete the *production* datastore!"
print 'Only models defined in your INSTALLED_APPS can be removed!'
print 'If you want to clear the whole datastore you have to use the ' \
'datastore viewer in the dashboard. Also, in order to delete all '\
'unneeded indexes you have to run appcfg.py vacuum_indexes.'
print 'In order to proceed you have to enter the following code:'
print code
response = raw_input('Repeat: ')
if code == response:
print 'Deleting...'
from django.db import models
from google.appengine.api import datastore as ds
for model in models.get_models():
print 'Deleting %s...' % model._meta.db_table
while True:
data = ds.Query(model._meta.db_table, keys_only=True).Get(200)
if not data:
break
ds.Delete(data)
print "Datastore flushed! Please check your dashboard's " \
'datastore viewer for any remaining entities and remove ' \
'all unneeded indexes with manage.py vacuum_indexes.'
else:
print 'Aborting'
exit()
else:
destroy_datastore(*self._get_paths())
self._setup_stubs()
| |
"""
Yarr model managers
"""
import datetime
import time
from django.db import connection, models, transaction
import bleach
from yarr import settings
from yarr.constants import ENTRY_UNREAD, ENTRY_READ, ENTRY_SAVED
###############################################################################
# Feed model
class FeedQuerySet(models.query.QuerySet):
def active(self):
"Filter to active feeds"
return self.filter(is_active=True)
def check(self, force=False, read=False, logfile=None):
"Check active feeds for updates"
for feed in self.active():
feed.check(force, read, logfile)
# Update the total and unread counts
self.update_count_unread()
self.update_count_total()
return self
def _do_update(self, extra):
"Perform the update for update_count_total and update_count_unread"
# Get IDs for current queries
ids = self.values_list('id', flat=True)
# If no IDs, no sense trying to do anything
if not ids:
return self
# Prepare query options
# IDs and states should only ever be ints, but force them to
# ints to be sure we don't introduce injection vulns
opts = {
'feed': models.loading.get_model('yarr', 'Feed')._meta.db_table,
'entry': models.loading.get_model('yarr', 'Entry')._meta.db_table,
'ids': ','.join([str(int(id)) for id in ids]),
# Fields which should be set in extra
'field': '',
'where': '',
}
opts.update(extra)
# Uses raw query so we can update in a single call to avoid race condition
cursor = connection.cursor()
cursor.execute(
"""UPDATE %(feed)s
SET %(field)s=COALESCE(
(
SELECT COUNT(1)
FROM %(entry)s
WHERE %(feed)s.id=feed_id%(where)s
GROUP BY feed_id
), 0
)
WHERE id IN (%(ids)s)
""" % opts
)
# Ensure changes are committed in Django 1.5 or earlier
transaction.commit_unless_managed()
return self
def update_count_total(self):
"Update the cached total counts"
return self._do_update({
'field': 'count_total',
})
def update_count_unread(self):
"Update the cached unread counts"
return self._do_update({
'field': 'count_unread',
'where': ' AND state=%s' % ENTRY_UNREAD,
})
def update_count_saved(self):
"Update the cached saved counts"
return self._do_update({
'field': 'count_saved',
'where': ' AND state=%s' % ENTRY_SAVED,
})
def count_unread(self):
"Get a dict of unread counts, with feed pks as keys"
return dict(self.values_list('pk', 'count_unread'))
class FeedManager(models.Manager):
def active(self):
"Active feeds"
return self.get_query_set().active()
def check(self, force=False, read=False, logfile=None):
"Check all active feeds for updates"
return self.get_query_set().check(force, read, logfile)
def update_count_total(self):
"Update the cached total counts"
return self.get_query_set().update_count_total()
def update_count_unread(self):
"Update the cached unread counts"
return self.get_query_set().update_count_unread()
def count_unread(self):
"Get a dict of unread counts, with feed pks as keys"
return self.get_query_set().count_unread()
def get_query_set(self):
"Return a FeedQuerySet"
return FeedQuerySet(self.model)
###############################################################################
# Entry model
class EntryQuerySet(models.query.QuerySet):
def user(self, user):
"Filter by user"
return self.filter(feed__user=user)
def read(self):
"Filter to read entries"
return self.filter(state=ENTRY_READ)
def unread(self):
"Filter to unread entries"
return self.filter(state=ENTRY_UNREAD)
def saved(self):
"Filter to saved entries"
return self.filter(state=ENTRY_SAVED)
def set_state(self, state, count_unread=False):
"""
Set a new state for these entries
If count_unread=True, returns a dict of the new unread count for the
affected feeds, {feed_pk: unread_count, ...}; if False, returns nothing
"""
# Get list of feed pks before the update changes this queryset
feed_pks = list(self.feeds().values_list('pk', flat=True))
# Update the state
self.update(state=state)
# Look up affected feeds
feeds = models.loading.get_model('yarr', 'Feed').objects.filter(
pk__in=feed_pks
)
# Update the unread counts for affected feeds
feeds.update_count_unread()
if count_unread:
return feeds.count_unread()
def feeds(self):
"Get feeds associated with entries"
return models.loading.get_model('yarr', 'Feed').objects.filter(
entries__in=self
).distinct()
def set_expiry(self):
"Ensure selected entries are set to expire"
return self.filter(
expires__isnull=True
).update(
expires=datetime.datetime.now() + datetime.timedelta(
days=settings.ITEM_EXPIRY,
)
)
def clear_expiry(self):
"Ensure selected entries will not expire"
return self.exclude(
expires__isnull=True
).update(expires=None)
def update_feed_unread(self):
"Update feed read count cache"
return self.feeds().update_count_unread()
class EntryManager(models.Manager):
def user(self, user):
"Filter by user"
return self.get_query_set().user(user)
def read(self):
"Get read entries"
return self.get_query_set().read()
def unread(self):
"Get unread entries"
return self.get_query_set().unread()
def saved(self):
"Get saved entries"
return self.get_query_set().saved()
def set_state(self, state):
"Set a new state for these entries, and update unread count"
return self.get_query_set().set_state(state)
def update_feed_unread(self):
"Update feed read count cache"
return self.get_query_set().update_feed_unread()
def from_feedparser(self, raw):
"""
Create an Entry object from a raw feedparser entry
Arguments:
raw The raw feedparser entry
Returns:
entry An Entry instance (not saved)
# ++ TODO: tags
Any tags will be stored on _tags, to be moved to tags field after save
The content field must be sanitised HTML of the entry's content, or
failing that its sanitised summary or description.
The date field should use the entry's updated date, then its published
date, then its created date. If none of those are present, it will fall
back to the current datetime when it is first saved.
The guid is either the guid according to the feed, or the entry link.
Currently ignoring the following feedparser attributes:
author_detail
contributors
created
enclosures
expired
license
links
publisher
source
summary_detail
title_detail
vcard
xfn
"""
# Create a new entry
entry = self.model()
# Get the title and content
entry.title = raw.get('title', '')
content = raw.get('content', [{'value': ''}])[0]['value']
if not content:
content = raw.get('description', '')
# Sanitise the content
entry.content = bleach.clean(
content,
tags=settings.ALLOWED_TAGS,
attributes=settings.ALLOWED_ATTRIBUTES,
styles=settings.ALLOWED_STYLES,
strip=True,
)
# Order: updated, published, created
# If not provided, needs to be None for update comparison
# Will default to current time when saved
date = raw.get(
'updated_parsed', raw.get(
'published_parsed', raw.get(
'created_parsed', None
)
)
)
if date is not None:
entry.date = datetime.datetime.fromtimestamp(
time.mktime(date)
)
entry.url = raw.get('link', '')
entry.guid = raw.get('guid', entry.url)
entry.author = raw.get('author', '')
entry.comments_url = raw.get('comments', '')
# ++ TODO: tags
"""
tags = raw.get('tags', None)
if tags is not None:
entry._tags = tags
"""
return entry
def get_query_set(self):
"""
Return an EntryQuerySet
"""
return EntryQuerySet(self.model)
| |
# -*- coding: utf-8 -*-
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Automated tests for many websites"""
import argparse
from environment import Environment
from websitetest import WebsiteTest
class Alexa(WebsiteTest):
def Login(self):
self.GoTo("https://www.alexa.com/secure/login")
self.FillUsernameInto("#email")
self.FillPasswordInto("#pwd")
self.Submit("#pwd")
class Dropbox(WebsiteTest):
def Login(self):
self.GoTo("https://www.dropbox.com/login")
self.FillUsernameInto(".text-input-input[name='login_email']")
self.FillPasswordInto(".text-input-input[name='login_password']")
self.Submit(".text-input-input[name='login_password']")
class Facebook(WebsiteTest):
def Login(self):
self.GoTo("https://www.facebook.com")
self.FillUsernameInto("[name='email']")
self.FillPasswordInto("[name='pass']")
self.Submit("[name='pass']")
class Github(WebsiteTest):
def Login(self):
self.GoTo("https://github.com/login")
self.FillUsernameInto("[name='login']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='commit']")
class Google(WebsiteTest):
def Login(self):
self.GoTo("https://accounts.google.com/ServiceLogin?sacu=1&continue=")
self.FillUsernameInto("#Email")
self.FillPasswordInto("#Passwd")
self.Submit("#Passwd")
class Imgur(WebsiteTest):
def Login(self):
self.GoTo("https://imgur.com/signin")
self.FillUsernameInto("[name='username']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='password']")
class Liveinternet(WebsiteTest):
def Login(self):
self.GoTo("http://liveinternet.ru/journals.php?s=&action1=login")
self.FillUsernameInto("[name='username']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='password']")
class Linkedin(WebsiteTest):
def Login(self):
self.GoTo("https://www.linkedin.com")
self.FillUsernameInto("#session_key-login")
self.FillPasswordInto("#session_password-login")
self.Submit("#session_password-login")
class Mailru(WebsiteTest):
def Login(self):
self.GoTo("https://mail.ru")
self.FillUsernameInto("#mailbox__login")
self.FillPasswordInto("#mailbox__password")
self.Submit("#mailbox__password")
class Nytimes(WebsiteTest):
def Login(self):
self.GoTo("https://myaccount.nytimes.com/auth/login")
self.FillUsernameInto("#userid")
self.FillPasswordInto("#password")
self.Submit("#password")
class Odnoklassniki(WebsiteTest):
def Login(self):
self.GoTo("https://ok.ru")
self.FillUsernameInto("#field_email")
self.FillPasswordInto("#field_password")
self.Submit("#field_password")
class Pinterest(WebsiteTest):
def Login(self):
self.GoTo("https://www.pinterest.com/login/")
self.FillUsernameInto("[name='username_or_email']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='password']")
class Reddit(WebsiteTest):
def Login(self):
self.GoTo("http://www.reddit.com")
self.Click(".user .login-required")
self.FillUsernameInto("#user_login")
self.FillPasswordInto("#passwd_login")
self.Wait(2)
self.Submit("#passwd_login")
class Tumblr(WebsiteTest):
def Login(self):
self.GoTo("https://www.tumblr.com/login")
self.FillUsernameInto("#signup_email")
self.FillPasswordInto("#signup_password")
self.Submit("#signup_password")
class Twitter(WebsiteTest):
def Login(self):
self.GoTo("https:///twitter.com")
self.FillUsernameInto("#signin-email")
self.FillPasswordInto("#signin-password")
self.Submit("#signin-password")
class Vkontakte(WebsiteTest):
def Login(self):
self.GoTo("https:///vk.com")
self.FillUsernameInto("[name='email']")
self.FillPasswordInto("[name='pass']")
self.Submit("[name='pass']")
class Wikia(WebsiteTest):
def Login(self):
self.GoTo("https://wikia.com");
self.Click("#AccountNavigation");
self.FillUsernameInto("#usernameInput")
self.FillPasswordInto("#passwordInput")
self.Submit("input.login-button")
class Wikipedia(WebsiteTest):
def Login(self):
self.GoTo("https://en.wikipedia.org/w/index.php?title=Special:UserLogin")
self.FillUsernameInto("#wpName1")
self.FillPasswordInto("#wpPassword1")
self.Submit("#wpPassword1")
class Wordpress(WebsiteTest):
def Login(self):
self.GoTo("https://de.wordpress.com/wp-login.php")
self.FillUsernameInto("[name='log']")
self.FillPasswordInto("[name='pwd']")
self.Submit("[name='pwd']")
class Yahoo(WebsiteTest):
def Login(self):
self.GoTo("https://login.yahoo.com")
self.FillUsernameInto("#username")
self.FillPasswordInto("#passwd")
self.Submit("#passwd")
class Yandex(WebsiteTest):
def Login(self):
self.GoTo("https://mail.yandex.com")
self.FillUsernameInto("#b-mail-domik-username11")
self.FillPasswordInto("#b-mail-domik-password11")
self.Click(".b-mail-button__button")
# Fails due to test framework issue(?).
class Aliexpress(WebsiteTest):
def Login(self):
self.GoTo("https://login.aliexpress.com/buyer.htm?return=http%3A%2F%2Fwww.aliexpress.com%2F")
self.WaitUntilDisplayed("iframe#alibaba-login-box")
frame = self.driver.find_element_by_css_selector("iframe#alibaba-login-box")
self.driver.switch_to_frame(frame)
self.FillUsernameInto("#fm-login-id")
self.FillPasswordInto("#fm-login-password")
self.Click("#fm-login-submit")
# Fails to save password.
class Adobe(WebsiteTest):
def Login(self):
self.GoTo("https://adobeid-na1.services.adobe.com/renga-idprovider/pages/l"
"ogin?callback=https%3A%2F%2Fims-na1.adobelogin.com%2Fims%2Fadob"
"eid%2Fadobedotcom2%2FAdobeID%2Ftoken%3Fredirect_uri%3Dhttps%253"
"A%252F%252Fwww.adobe.com%252F%2523from_ims%253Dtrue%2526old_has"
"h%253D%2526client_id%253Dadobedotcom2%2526scope%253Dcreative_cl"
"oud%25252CAdobeID%25252Copenid%25252Cgnav%25252Cread_organizati"
"ons%25252Cadditional_info.projectedProductContext%2526api%253Da"
"uthorize&client_id=adobedotcom2&scope=creative_cloud%2CAdobeID%"
"2Copenid%2Cgnav%2Cread_organizations%2Cadditional_info.projecte"
"dProductContext&display=web_v2&denied_callback=https%3A%2F%2Fim"
"s-na1.adobelogin.com%2Fims%2Fdenied%2Fadobedotcom2%3Fredirect_u"
"ri%3Dhttps%253A%252F%252Fwww.adobe.com%252F%2523from_ims%253Dtr"
"ue%2526old_hash%253D%2526client_id%253Dadobedotcom2%2526scope%2"
"53Dcreative_cloud%25252CAdobeID%25252Copenid%25252Cgnav%25252Cr"
"ead_organizations%25252Cadditional_info.projectedProductContext"
"%2526api%253Dauthorize%26response_type%3Dtoken&relay=afebfef8-e"
"2b3-4c0e-9c94-07baf205bae8&locale=en_US&flow_type=token&dc=fals"
"e&client_redirect=https%3A%2F%2Fims-na1.adobelogin.com%2Fims%2F"
"redirect%2Fadobedotcom2%3Fclient_redirect%3Dhttps%253A%252F%252"
"Fwww.adobe.com%252F%2523from_ims%253Dtrue%2526old_hash%253D%252"
"6client_id%253Dadobedotcom2%2526scope%253Dcreative_cloud%25252C"
"AdobeID%25252Copenid%25252Cgnav%25252Cread_organizations%25252C"
"additional_info.projectedProductContext%2526api%253Dauthorize&i"
"dp_flow_type=login")
self.FillUsernameInto("[name='username']")
self.FillPasswordInto("[name='password']")
self.Submit("#sign_in")
# Bug not reproducible without test.
class Amazon(WebsiteTest):
def Login(self):
self.GoTo(
"https://www.amazon.com/ap/signin?openid.assoc_handle=usflex"
"&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net"
"%2Fauth%2F2.0")
self.FillUsernameInto("[name='email']")
self.FillPasswordInto("[name='password']")
self.Click("#signInSubmit-input")
# Password not saved.
class Ask(WebsiteTest):
def Login(self):
self.GoTo("http://www.ask.com/answers/browse?qsrc=321&q=&o=0&l=dir#")
while not self.IsDisplayed("[name='username']"):
self.Click("#a16CnbSignInText")
self.Wait(1)
self.FillUsernameInto("[name='username']")
self.FillPasswordInto("[name='password']")
self.Click(".signin_show.signin_submit")
# Password not saved.
class Baidu(WebsiteTest):
def Login(self):
self.GoTo("https://passport.baidu.com")
self.FillUsernameInto("[name='userName']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='password']")
# Chrome crashes.
class Buzzfeed(WebsiteTest):
def Login(self):
self.GoTo("http://www.buzzfeed.com/signin")
self.FillUsernameInto("#login-username")
self.FillPasswordInto("#login-password")
self.Submit("#login-password")
# http://crbug.com/368690
class Cnn(WebsiteTest):
def Login(self):
self.GoTo("http://www.cnn.com")
self.Wait(5)
while not self.IsDisplayed(".cnnOvrlyBtn.cnnBtnLogIn"):
self.ClickIfClickable("#hdr-auth .no-border.no-pad-right a")
self.Wait(1)
self.Click(".cnnOvrlyBtn.cnnBtnLogIn")
self.FillUsernameInto("#cnnOverlayEmail1l")
self.FillPasswordInto("#cnnOverlayPwd")
self.Click(".cnnOvrlyBtn.cnnBtnLogIn")
self.Click(".cnnOvrlyBtn.cnnBtnLogIn")
self.Wait(5)
# Fails due to "Too many failed logins. Please wait a minute".
# http://crbug.com/466953
class Craigslist(WebsiteTest):
def Login(self):
self.GoTo("https://accounts.craigslist.org/login")
self.FillUsernameInto("#inputEmailHandle")
self.FillPasswordInto("#inputPassword")
self.Submit("button")
# Crashes.
class Dailymotion(WebsiteTest):
def Login(self):
self.GoTo("http://www.dailymotion.com/gb")
self.Click(".sd_header__login span")
self.FillUsernameInto("[name='username']")
self.FillPasswordInto("[name='password']")
self.Submit("[name='save']")
# http://crbug.com/368690
class Ebay(WebsiteTest):
def Login(self):
self.GoTo("https://signin.ebay.com/")
self.FillUsernameInto("[name='userid']")
self.FillPasswordInto("[name='pass']")
self.Submit("[name='pass']")
# Iframe, password saved but not autofilled.
class Espn(WebsiteTest):
def Login(self):
self.GoTo("http://espn.go.com/")
while not self.IsDisplayed("#cboxLoadedContent iframe"):
self.Click("#signin .cbOverlay")
self.Wait(1)
frame = self.driver.find_element_by_css_selector("#cboxLoadedContent "
"iframe")
self.driver.switch_to_frame(frame)
self.FillUsernameInto("#username")
self.FillPasswordInto("#password")
while self.IsDisplayed("#password"):
self.ClickIfClickable("#submitBtn")
self.Wait(1)
# Fails due to test framework issue.
class Flipkart(WebsiteTest):
def Login(self):
self.GoTo("http://www.flipkart.com/")
self.Wait(2)
self.Click(".header-links .js-login")
self.FillUsernameInto("#login_email_id")
self.FillPasswordInto("#login_password")
self.Submit("#login_password")
# Iframe, password saved but not autofilled.
class Instagram(WebsiteTest):
def Login(self):
self.GoTo("https://instagram.com/accounts/login/")
self.Wait(5)
frame = self.driver.find_element_by_css_selector(".hiFrame")
self.driver.switch_to_frame(frame)
self.FillUsernameInto("#lfFieldInputUsername")
self.FillPasswordInto("#lfFieldInputPassword")
self.Submit(".lfSubmit")
# http://crbug.com/367768
class Live(WebsiteTest):
def Login(self):
self.GoTo("https://login.live.com")
self.FillUsernameInto("[name='login']")
self.FillPasswordInto("[name='passwd']")
self.Submit("[name='passwd']")
# http://crbug.com/368690
class One63(WebsiteTest):
def Login(self):
self.GoTo("http://www.163.com")
self.HoverOver("#js_N_navHighlight")
self.FillUsernameInto("#js_loginframe_username")
self.FillPasswordInto(".ntes-loginframe-label-ipt[type='password']")
self.Click(".ntes-loginframe-btn")
class StackExchange(WebsiteTest):
def Login(self):
self.GoTo("https://stackexchange.com/users/login#log-in")
iframe_selector = "#affiliate-signin-iframe"
self.WaitUntilDisplayed(iframe_selector)
frame = self.driver.find_element_by_css_selector(iframe_selector)
self.driver.switch_to_frame(frame)
self.FillUsernameInto("[name='email']")
self.FillPasswordInto("[name='password']")
self.Submit("[value='Sign In']")
# http://crbug.com/368690
class Vube(WebsiteTest):
def Login(self):
self.GoTo("https://vube.com")
self.Click("[vube-login='']")
self.FillUsernameInto("[ng-model='login.user']")
self.FillPasswordInto("[ng-model='login.pass']")
while (self.IsDisplayed("[ng-model='login.pass']")
and not self.IsDisplayed(".prompt.alert")):
self.ClickIfClickable("[ng-click='login()']")
self.Wait(1)
# Password not saved.
class Ziddu(WebsiteTest):
def Login(self):
self.GoTo("http://www.ziddu.com/login.php")
self.FillUsernameInto("#email")
self.FillPasswordInto("#password")
self.Click(".login input")
all_tests = {
"163": One63("163"), # http://crbug.com/368690
"adobe": Adobe("adobe"), # Password saving not offered.
"alexa": Alexa("alexa"),
"aliexpress": Aliexpress("aliexpress"), # Fails due to test framework issue.
"amazon": Amazon("amazon"), # Bug not reproducible without test.
"ask": Ask("ask"), # Password not saved.
"baidu": Baidu("baidu"), # Password not saved.
"buzzfeed": Buzzfeed("buzzfeed"),
"cnn": Cnn("cnn"), # http://crbug.com/368690
"craigslist": Craigslist("craigslist"), # Too many failed logins per time.
"dailymotion": Dailymotion("dailymotion"), # Crashes.
"dropbox": Dropbox("dropbox"),
"ebay": Ebay("ebay"), # http://crbug.com/368690
"espn": Espn("espn"), # Iframe, password saved but not autofilled.
"facebook": Facebook("facebook"),
"flipkart": Flipkart("flipkart"), # Fails due to test framework issue.
"github": Github("github"),
"google": Google("google"),
"imgur": Imgur("imgur"),
"instagram": Instagram("instagram"), # Iframe, pw saved but not autofilled.
"linkedin": Linkedin("linkedin"),
"liveinternet": Liveinternet("liveinternet"),
"live": Live("live", username_not_auto=True), # http://crbug.com/367768
"mailru": Mailru("mailru"),
"nytimes": Nytimes("nytimes"),
"odnoklassniki": Odnoklassniki("odnoklassniki"),
"pinterest": Pinterest("pinterest"),
"reddit": Reddit("reddit", username_not_auto=True),
"stackexchange": StackExchange("stackexchange"), # Iframe, not autofilled.
"tumblr": Tumblr("tumblr", username_not_auto=True),
"twitter": Twitter("twitter"),
"vkontakte": Vkontakte("vkontakte"),
"vube": Vube("vube"), # http://crbug.com/368690
"wikia": Wikia("wikia"),
"wikipedia": Wikipedia("wikipedia", username_not_auto=True),
"wordpress": Wordpress("wordpress"),
"yahoo": Yahoo("yahoo", username_not_auto=True),
"yandex": Yandex("yandex"),
"ziddu": Ziddu("ziddu"), # Password not saved.
}
def SaveResults(environment_tests_results, environment_save_path):
"""Save the test results in an xml file.
Args:
environment_tests_results: A list of the TestResults that are going to be
saved.
environment_save_path: The file where the results are going to be saved.
If it's None, the results are not going to be stored.
Raises:
Exception: An exception is raised if the file is not found.
"""
if environment_save_path:
xml = "<result>"
for (name, test_type, success, failure_log) in environment_tests_results:
xml += (
"<test name='{0}' successful='{1}' type='{2}'>{3}</test>".format(
name, success, test_type, failure_log))
xml += "</result>"
with open(environment_save_path, "w") as save_file:
save_file.write(xml)
def RunTest(chrome_path, chromedriver_path, profile_path,
environment_passwords_path, website_test_name, test_type):
"""Runs the test for the specified website.
Args:
chrome_path: The chrome binary file.
chromedriver_path: The chromedriver binary file.
profile_path: The chrome testing profile folder.
environment_passwords_path: The usernames and passwords file.
website_test_name: Name of the website to test (refer to keys in
all_tests above).
Returns:
The results of the test as list of TestResults.
Raises:
Exception: An exception is raised if one of the tests for the website
fails, or if the website name is not known.
"""
enable_automatic_password_saving = (
test_type == WebsiteTest.TEST_TYPE_SAVE_AND_AUTOFILL)
environment = Environment(chrome_path, chromedriver_path, profile_path,
environment_passwords_path,
enable_automatic_password_saving)
if website_test_name in all_tests:
environment.AddWebsiteTest(all_tests[website_test_name])
else:
raise Exception("Test name {} is unknown.".format(website_test_name))
environment.RunTestsOnSites(test_type)
environment.Quit()
return environment.tests_results
def main():
parser = argparse.ArgumentParser(
description="Password Manager automated tests help.")
parser.add_argument(
"--chrome-path", action="store", dest="chrome_path",
help="Set the chrome path (required).", required=True)
parser.add_argument(
"--chromedriver-path", action="store", dest="chromedriver_path",
help="Set the chromedriver path (required).", required=True)
parser.add_argument(
"--profile-path", action="store", dest="profile_path",
help="Set the profile path (required). You just need to choose a "
"temporary empty folder. If the folder is not empty all its content "
"is going to be removed.",
required=True)
parser.add_argument(
"--passwords-path", action="store", dest="passwords_path",
help="Set the usernames/passwords path (required).", required=True)
parser.add_argument("--save-path", action="store", dest="save_path",
help="Write the results in a file.")
parser.add_argument("test", help="Test to be run.")
args = parser.parse_args()
save_path = None
if args.save_path:
save_path = args.save_path
tests_results = RunTest(
args.chrome_path, args.chromedriver_path, args.profile_path,
args.passwords_path, args.test, WebsiteTest.TEST_TYPE_PROMPT_FAIL)
tests_results += RunTest(
args.chrome_path, args.chromedriver_path, args.profile_path,
args.passwords_path, args.test, WebsiteTest.TEST_TYPE_PROMPT_SUCCESS)
tests_results += RunTest(
args.chrome_path, args.chromedriver_path, args.profile_path,
args.passwords_path, args.test, WebsiteTest.TEST_TYPE_SAVE_AND_AUTOFILL)
SaveResults(tests_results, save_path)
if __name__ == "__main__":
main()
| |
"""
An HTTP trigger Azure Function that returns a SAS token for Azure Storage for the specified container and blob name.
You can also specify access permissions for the container/blob name and optionally its token time-to-live period.
The SAS token expires in an hour by default.
[HTTP Request body format]
HTTP Request body must include the following parameters:
{
'permission': '<Signed permission for shared access signature (Required)>',
'container': '<Container name to access (Required)>',
'blobname': '<Blob object name to access (Optional)>'
'ttl': '<Token time to live period in hours. 1hour by default (Optional)>'
}
The following values can be used for permissions:
"a" (Add), "r" (Read), "w" (Write), "d" (Delete), "l" (List)
Concatenate multiple permissions, such as "rwa" = Read, Write, Add
Sample Request Body
{
'permission': "rl",
'container': "functions",
'blobname': "yokawasa.png"
}
[Response body format]
HTTP response body format is:
{
'token': '<Shared Access Signature Token string>',
'url' : '<SAS resource URI>'
}
Sample Response Body
{"token": "sv=2018-03-28&ss=b&srt=o&sp=rl&se=2019-03-29T14%3A02%3A37Z&st=2019-03-29T11%3A57%3A37Z&spr=https&sig=Sh7RAa5MZBk7gfv0haCbEbllFXoiOWJDK9itzPeqURE%3D", "url": "https://azfuncv2linuxstore.blob.core.windows.net/functiontest/sample.jpg?sv=2018-03-28&ss=b&srt=o&sp=rl&se=2019-03-29T14%3A02%3A37Z&st=2019-03-29T11%3A57%3A37Z&spr=https&sig=Sh7RAa5MZBk7gfv0haCbEbllFXoiOWJDK9itzPeqURE%3D" }
"""
import os
import json
import base64
import hmac
import hashlib
import urllib.parse
from datetime import datetime, timedelta
import logging
import azure.functions as func
_ALLOWED_HTTP_METHOD = "POST"
_AZURE_STORAGE_API_VERSION = "2018-03-28"
_AZURE_STORAGE_CONN_STRING_ENV_NAME = "MyStorageConnectionString"
_SAS_TOKEN_DEFAULT_TTL = 1
connString = os.environ[_AZURE_STORAGE_CONN_STRING_ENV_NAME]
def write_http_response(status, body_dict):
return_dict = {
"status": status,
"body": json.dumps(body_dict),
"headers": {
"Content-Type": "application/json"
}
}
return json.dumps(return_dict)
#return func.HttpResponse(
# json.dumps(return_dict),
# status_code=status
# )
def generate_sas_token (storage_account, storage_key, permission, token_ttl, container_name, blob_name = None ):
sp = permission
# Set start time to five minutes ago to avoid clock skew.
st= str((datetime.utcnow() - timedelta(minutes=5) ).strftime("%Y-%m-%dT%H:%M:%SZ"))
se= str((datetime.utcnow() + timedelta(hours=token_ttl)).strftime("%Y-%m-%dT%H:%M:%SZ"))
srt = 'o' if blob_name else 'co'
# Construct input value
inputvalue = "{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n{8}\n".format(
storage_account, # 0. account name
sp, # 1. signed permission (sp)
'b', # 2. signed service (ss)
srt, # 3. signed resource type (srt)
st, # 4. signed start time (st)
se, # 5. signed expire time (se)
'', # 6. signed ip
'https', # 7. signed protocol
_AZURE_STORAGE_API_VERSION) # 8. signed version
# Create base64 encoded signature
hash =hmac.new(
base64.b64decode(storage_key),
inputvalue.encode(encoding='utf-8'),
hashlib.sha256
).digest()
sig = base64.b64encode(hash)
querystring = {
'sv': _AZURE_STORAGE_API_VERSION,
'ss': 'b',
'srt': srt,
'sp': sp,
'se': se,
'st': st,
'spr': 'https',
'sig': sig,
}
sastoken = urllib.parse.urlencode(querystring)
sas_url = None
if blob_name:
sas_url = "https://{0}.blob.core.windows.net/{1}/{2}?{3}".format(
storage_account,
container_name,
blob_name,
sastoken)
else:
sas_url = "https://{0}.blob.core.windows.net/{1}?{2}".format(
storage_account,
container_name,
sastoken)
return {
'token': sastoken,
'url' : sas_url
}
def main(req: func.HttpRequest) -> str:
logging.info('Python HTTP trigger function processed a request.')
# Get Azure Storage Connection String
storage_account = None
storage_key = None
ll = connString.split(';')
for l in ll:
ss = l.split('=',1)
if len(ss) != 2:
continue
if ss[0] == 'AccountName':
storage_account = ss[1]
if ss[0] == 'AccountKey':
storage_key = ss[1]
if not storage_account or not storage_key:
return write_http_response(
400,
{ 'message': 'Function configuration error: NO Azure Storage connection string found!' }
)
# Check HTTP Mehtod
if req.method.lower() !=_ALLOWED_HTTP_METHOD.lower():
return write_http_response(
405,
{ 'message': 'Only POST HTTP Method is allowed' }
)
permission = None
container_name = None
blob_name = None
try:
req_body = req.get_json()
except ValueError:
# Case: Empty body
return write_http_response(
400,
{ 'message': 'Invalid HTTP request body' }
)
else:
# Case: Exception raised in get_json()
if not 'req_body' in locals():
return write_http_response(
400,
{ 'message': 'Invalid HTTP request body' }
)
# Case: Invalid parameters
if not req_body.get('permission') or not req_body.get('container'):
return write_http_response(
400,
{ 'message': 'Permission and container parameters must be included in HTTP request body' }
)
permission = req_body.get('permission')
container_name = req_body.get('container')
blob_name = req_body.get('blobname')
token_ttl = _SAS_TOKEN_DEFAULT_TTL
if req_body.get('ttl'):
token_ttl = int(req_body.get('ttl'))
if token_ttl < 1:
return write_http_response(
400,
{ 'message': 'Token ttl must be digit and more than 0' }
)
# Generate SAS Token
token_dict = generate_sas_token(
storage_account,
storage_key,
permission,
token_ttl,
container_name,
blob_name
)
logging.info("Generated Token token=>{} url=>{}".format(token_dict['token'], token_dict['url']))
# Write HTTP Response
return write_http_response(200, token_dict)
| |
"""
Network Utilities
(from web.py)
"""
__all__ = [
"validipaddr", "validip6addr", "validipport", "validip", "validaddr",
"urlquote",
"httpdate", "parsehttpdate",
"htmlquote", "htmlunquote", "websafe",
]
import urllib, time
try: import datetime
except ImportError: pass
import re
import socket
def validip6addr(address):
"""
Returns True if `address` is a valid IPv6 address.
>>> validip6addr('::')
True
>>> validip6addr('aaaa:bbbb:cccc:dddd::1')
True
>>> validip6addr('1:2:3:4:5:6:7:8:9:10')
False
>>> validip6addr('12:10')
False
"""
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
return True
def validipaddr(address):
"""
Returns True if `address` is a valid IPv4 address.
>>> validipaddr('192.168.1.1')
True
>>> validipaddr('192.168.1.800')
False
>>> validipaddr('192.168.1')
False
"""
try:
octets = address.split('.')
if len(octets) != 4:
return False
for x in octets:
if not (0 <= int(x) <= 255):
return False
except ValueError:
return False
return True
def validipport(port):
"""
Returns True if `port` is a valid IPv4 port.
>>> validipport('9000')
True
>>> validipport('foo')
False
>>> validipport('1000000')
False
"""
try:
if not (0 <= int(port) <= 65535):
return False
except ValueError:
return False
return True
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""
Returns `(ip_address, port)` from string `ip_addr_port`
>>> validip('1.2.3.4')
('1.2.3.4', 8080)
>>> validip('80')
('0.0.0.0', 80)
>>> validip('192.168.0.1:85')
('192.168.0.1', 85)
>>> validip('::')
('::', 8080)
>>> validip('[::]:88')
('::', 88)
>>> validip('[::1]:80')
('::1', 80)
"""
addr = defaultaddr
port = defaultport
#Matt Boswell's code to check for ipv6 first
match = re.search(r'^\[([^]]+)\](?::(\d+))?$',ip) #check for [ipv6]:port
if match:
if validip6addr(match.group(1)):
if match.group(2):
if validipport(match.group(2)): return (match.group(1),int(match.group(2)))
else:
return (match.group(1),port)
else:
if validip6addr(ip): return (ip,port)
#end ipv6 code
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) and validipport(port):
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
port = int(port)
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
return (addr, port)
def validaddr(string_):
"""
Returns either (ip_address, port) or "/path/to/socket" from string_
>>> validaddr('/path/to/socket')
'/path/to/socket'
>>> validaddr('8000')
('0.0.0.0', 8000)
>>> validaddr('127.0.0.1')
('127.0.0.1', 8080)
>>> validaddr('127.0.0.1:8000')
('127.0.0.1', 8000)
>>> validip('[::1]:80')
('::1', 80)
>>> validaddr('fff')
Traceback (most recent call last):
...
ValueError: fff is not a valid IP address/port
"""
if '/' in string_:
return string_
else:
return validip(string_)
def urlquote(val):
"""
Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD'
"""
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
else: val = val.encode('utf-8')
return urllib.quote(val)
def httpdate(date_obj):
"""
Formats a datetime object for use in HTTP headers.
>>> import datetime
>>> httpdate(datetime.datetime(1970, 1, 1, 1, 1, 1))
'Thu, 01 Jan 1970 01:01:01 GMT'
"""
return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
def parsehttpdate(string_):
"""
Parses an HTTP date into a datetime object.
>>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
datetime.datetime(1970, 1, 1, 1, 1, 1)
"""
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6])
def htmlquote(text):
r"""
Encodes `text` for raw use in HTML.
>>> htmlquote(u"<'&\">")
u'<'&">'
"""
text = text.replace(u"&", u"&") # Must be done first!
text = text.replace(u"<", u"<")
text = text.replace(u">", u">")
text = text.replace(u"'", u"'")
text = text.replace(u'"', u""")
return text
def htmlunquote(text):
r"""
Decodes `text` that's HTML quoted.
>>> htmlunquote(u'<'&">')
u'<\'&">'
"""
text = text.replace(u""", u'"')
text = text.replace(u"'", u"'")
text = text.replace(u">", u">")
text = text.replace(u"<", u"<")
text = text.replace(u"&", u"&") # Must be done last!
return text
def websafe(val):
r"""Converts `val` so that it is safe for use in Unicode HTML.
>>> websafe("<'&\">")
u'<'&">'
>>> websafe(None)
u''
>>> websafe(u'\u203d')
u'\u203d'
>>> websafe('\xe2\x80\xbd')
u'\u203d'
"""
if val is None:
return u''
elif isinstance(val, str):
val = val.decode('utf-8')
elif not isinstance(val, unicode):
val = unicode(val)
return htmlquote(val)
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Michael Hogg
# This file is part of pyvct - See LICENSE.txt for information on usage and redistribution
import os
from abaqus import session
from abaqusConstants import ELEMENT_NODAL
from cythonMods import createElementMap
import elementTypes as et
import copy
from odbAccess import OdbMeshElementType
import numpy as np
# ~~~~~~~~~~
def convert3Dto1Dindex(i,j,k,NX,NY,NZ):
"""Converts 3D array index to 1D array index"""
index = i+j*NX+k*NX*NY
return index
# ~~~~~~~~~~
def convert1Dto3Dindex(index,NX,NY,NZ):
"""Converts 1D array index to 1D array index"""
k = index / (NX*NY)
j = (index - k*NX*NY) / NX
i = index - k*NX*NY - j*NX
return [i,j,k]
# ~~~~~~~~~~
def transformPoint(TM,point):
"""Transforms point using supplied transform"""
point = np.append(point,1.0)
return np.dot(TM,point)[:3]
# ~~~~~~~~~~
def createTransformationMatrix(Ma,Mb,Vab,rel='a'):
"""
Creates a transformation matrix that can be used to transform a point from csys a to csys b.
Ma = 3x3 matrix containing unit vectors of orthogonal coordinate directions for csys a
Mb = 3x3 matrix containing unit vectors of orthogonal coordinate directions for csys b
Vab = 3x1 vector from origin of csys a to csys b
rel = 'a' or 'b' = Character to indicate if Vab is relative to csys a or csys b
"""
if rel!='a' and rel!='b': return None
a1,a2,a3 = Ma
b1,b2,b3 = Mb
# Rotation matrix
R = np.identity(4,np.float)
R[0,0:3] = [np.dot(b1,a1), np.dot(b1,a2), np.dot(b1,a3)]
R[1,0:3] = [np.dot(b2,a1), np.dot(b2,a2), np.dot(b2,a3)]
R[2,0:3] = [np.dot(b3,a1), np.dot(b3,a2), np.dot(b3,a3)]
# Transformation matrix
if rel=='b':
Vab = np.append(Vab,1.0)
Vab = np.dot(R.T,Vab)[0:3]
T = np.identity(4,np.float)
T[0:3,3] = -Vab
# Transformation matrix
return np.dot(R,T)
# ~~~~~~~~~~
def getTMfromCsys(odb,csysName):
if csysName=='GLOBAL': return None
# Parse coordinate system name
csysName = csysName.split(r'(')[0].strip()
# Get ABAQUS datumCsys
lcsys = None
# Check odb csyses
if csysName in odb.rootAssembly.datumCsyses.keys():
lcsys = odb.rootAssembly.datumCsyses[csysName]
# Check scratch odb csyses
if odb.path in session.scratchOdbs.keys():
if csysName in session.scratchOdbs[odb.path].rootAssembly.datumCsyses.keys():
lcsys = session.scratchOdbs[odb.path].rootAssembly.datumCsyses[csysName]
if lcsys==None: return None
# Global coordinate system
Og = np.zeros(3)
Mg = np.identity(3)
# Local coordinate system
Ol = lcsys.origin
Ml = np.zeros((3,3))
Ml[0] = lcsys.xAxis/np.linalg.norm(lcsys.xAxis) # NOTE: This should already be a unit vector
Ml[1] = lcsys.yAxis/np.linalg.norm(lcsys.yAxis) # Shouldn't need to normalise
Ml[2] = lcsys.zAxis/np.linalg.norm(lcsys.zAxis)
# Create transformation matrix
Vgl = Ol-Og
TM = createTransformationMatrix(Mg,Ml,Vgl,rel='a')
return TM
# ~~~~~~~~~~
def parseRegionSetName(regionSetName):
""" Get region and setName from regionSetName """
if '.' in regionSetName: region,setName = regionSetName.split('.')
else: region,setName = 'Assembly',regionSetName
return region,setName
# ~~~~~~~~~~
def getElements(odb,regionSetName):
"""Get element type and number of nodes per element"""
# Get region set and elements
region,setName = parseRegionSetName(regionSetName)
if region=='Assembly':
setRegion = odb.rootAssembly.elementSets[regionSetName]
if type(setRegion.elements[0])==OdbMeshElementType:
elements = setRegion.elements
else:
elements=[]
for meshElemArray in setRegion.elements:
for e in meshElemArray:
elements.append(e)
else:
if setName=='ALL':
setRegion = odb.rootAssembly.instances[region]
elements = setRegion.elements
else:
setRegion = odb.rootAssembly.instances[region].elementSets[setName]
elements = setRegion.elements
# Get part information: (1) instance names, (2) element types and (3) number of each element type
partInfo={}
for e in elements:
if not partInfo.has_key(e.instanceName): partInfo[e.instanceName]={}
if not partInfo[e.instanceName].has_key(e.type): partInfo[e.instanceName][e.type]=0
partInfo[e.instanceName][e.type]+=1
# Put all element types from all part instances in a list
eTypes = []
for k1 in partInfo.keys():
for k2 in partInfo[k1].keys(): eTypes.append(k2)
eTypes = dict.fromkeys(eTypes,1).keys()
# Check that elements are supported
usTypes=[]
for eType in eTypes:
if not any([True for seType in et.seTypes.keys() if seType==eType]):
usTypes.append(str(eType))
if len(usTypes)>0:
if len(usTypes)==1: strvars = ('',usTypes[0],regionSetName,'is')
else: strvars = ('s',', '.join(usTypes),regionSetName,'are')
print '\nElement type%s %s in region %s %s not supported' % strvars
return None
return partInfo, setRegion, elements
# ~~~~~~~~~~
def getPartData(odb,regionSetName,TM):
"""Get region data based on original (undeformed) coordinates"""
# Get elements and part info
result = getElements(odb,regionSetName)
if result==None: return None
else:
regionInfo, regionSet, elements = result
numElems = len(elements)
ec = dict([(ename,eclass()) for ename,eclass in et.seTypes.items()])
# Create empty dictionary,array to store element data
elemData = copy.deepcopy(regionInfo)
for instName in elemData.keys():
for k,v in elemData[instName].items():
elemData[instName][k] = np.zeros(v,dtype=[('label','|i4'),('econn','|i4',(ec[k].numNodes,))])
eCount = dict([(k1,dict([k2,0] for k2 in regionInfo[k1].keys())) for k1 in regionInfo.keys()])
setNodeLabs = dict([(k,{}) for k in regionInfo.keys()])
# Create a list of element connectivities (list of nodes connected to each element)
for e in xrange(numElems):
elem = elements[e]
eConn = elem.connectivity
eInst = elem.instanceName
eType = elem.type
eIndex = eCount[eInst][eType]
elemData[eInst][eType][eIndex] = (elem.label,eConn)
eCount[eInst][eType] +=1
for n in eConn:
setNodeLabs[eInst][n] = 1
numSetNodes = np.sum([len(setNodeLabs[k]) for k in setNodeLabs.keys()])
setNodes = np.zeros(numSetNodes,dtype=[('instName','|a80'),('label','|i4'),('coord','|f4',(3,))])
nodeCount = 0
for instName in setNodeLabs.keys():
inst = odb.rootAssembly.instances[instName]
nodes = inst.nodes
numNodes = len(nodes)
for n in xrange(numNodes):
node = nodes[n]
label = node.label
if label in setNodeLabs[instName]:
setNodes[nodeCount] = (instName,label,node.coordinates)
nodeCount+=1
# Transform the coordinates from the global csys to the local csys
if TM is not None:
print 'TM is not None'
for i in xrange(numSetNodes):
setNodes['coord'][i] = transformPoint(TM,setNodes['coord'][i])
# Get bounding box
low = np.min(setNodes['coord'],axis=0)
upp = np.max(setNodes['coord'],axis=0)
bbox = (low,upp)
# Convert setNodes to a dictionary for fast indexing by node label
setNodeList = dict([(k,{}) for k in regionInfo.keys()])
for instName in setNodeList.keys():
indx = np.where(setNodes['instName']==instName)
setNodeList[instName] = dict(zip(setNodes[indx]['label'],setNodes[indx]['coord']))
return regionSet,elemData,setNodeList,bbox
# ~~~~~~~~~~
def checkDependencies():
"""Check pyvxray dependencies are available"""
try:
from dicom.dataset import Dataset, FileDataset
except:
print 'Error: Cannot load pydicom package'
return False
return True
# ~~~~~~~~~~
def createVirtualCT(odbName,bRegionSetName,BMDfoname,showImplant,iRegionSetName,
iDensity,stepNumber,csysName,sliceResolution,sliceSpacing,newSubDirName):
"""Creates a virtual CT stack from an ABAQUS odb file. The odb file should contain \n""" + \
"""a step with a fieldoutput variable representing bone mineral density (BMD)"""
# User message
print '\npyvCT: Create virtual CT plugin'
# Check dependencies
if not checkDependencies():
print 'Error: Virtual CT not created\n'
return
# Process inputs
sliceResolutions = {'256 x 256':(256,256), '512 x 512':(512,512)}
stepNumber = int(stepNumber)
sliceSpacing = float(sliceSpacing)
# Set variables
NX,NY = sliceResolutions[sliceResolution]
iDensity /= 1000.
odb = session.odbs[odbName]
ec = dict([(ename,eclass()) for ename,eclass in et.seTypes.items()])
# Get transformation matrix to convert from global to local coordinate system
TM = getTMfromCsys(odb,csysName)
print '\nCT reference frame will be relative to %s' % csysName
# Get part data and create a bounding box. The bounding box should include the implant if specified
bRegion,bElemData,bNodeList,bBBox = getPartData(odb,bRegionSetName,TM)
if showImplant:
iRegion,iElemData,iNodeList,iBBox = getPartData(odb,iRegionSetName,TM)
bbLow = np.min((bBBox[0],iBBox[0]),axis=0)
bbUpp = np.max((bBBox[1],iBBox[1]),axis=0)
else:
bbLow,bbUpp = bBBox
# Define extents of CT stack
bbox = np.array([bbLow,bbUpp])
bbCentre = bbox.mean(axis=0)
bbSides = 1.05*(bbUpp - bbLow)
bbSides[:2] = np.max(bbSides[:2])
bbLow = bbCentre-0.5*bbSides
bbUpp = bbCentre+0.5*bbSides
lx,ly,lz = bbUpp - bbLow
x0,y0,z0 = bbLow
xN,yN,zN = bbUpp
# Generate CT grid
NZ = int(np.ceil(lz/sliceSpacing+1))
x = np.linspace(x0,xN,NX)
y = np.linspace(y0,yN,NY)
z = np.linspace(z0,zN,NZ)
# Get BMD values for all elements
# Get frame
stepName = "Step-%i" % (stepNumber)
frame = odb.steps[stepName].frames[-1]
# Get BMD data for bRegion in frame
print 'Getting BMD values'
# Initialise BMDvalues
BMDvalues = dict([(k,{}) for k in bElemData.keys()])
for instName,instData in bElemData.items():
for etype,eData in instData.items():
for i in xrange(eData.size):
BMDvalues[instName][eData[i]['label']] = et.seTypes[etype]()
# Get list of BMD element_nodal values for each node in bone region
BMDfov = frame.fieldOutputs[BMDfoname].getSubset(region=bRegion, position=ELEMENT_NODAL).values
BMDnv = {}
for i in xrange(len(BMDfov)):
val = BMDfov[i]
instanceName = val.instance.name
elemLabel = val.elementLabel
nodeLabel = val.nodeLabel
if not BMDnv.has_key(instanceName): BMDnv[instanceName] = {}
if not BMDnv[instanceName].has_key(nodeLabel): BMDnv[instanceName][nodeLabel] = []
BMDnv[instanceName][nodeLabel].append(val.data)
# Average BMD element_nodal values
for instName in BMDnv.keys():
for nl in BMDnv[instName].keys():
BMDnv[instName][nl] = np.mean(BMDnv[instName][nl])
# Add nodal BMD values to BMDvalues array
for instName in bElemData.keys():
for etype in bElemData[instName].keys():
eData = bElemData[instName][etype]
for i in xrange(eData.size):
el = eData[i]['label']
nc = eData[i]['econn']
for indx in range(nc.size):
nl = nc[indx]
val = BMDnv[instName][nl]
BMDvalues[instName][el].setNodalValueByIndex(indx,val)
# Create the element map for the bone and map values over to voxel array
print 'Mapping BMD values'
voxels = np.zeros((NX,NY,NZ),dtype=np.float32)
for instName in bElemData.keys():
for etype in bElemData[instName].keys():
edata = bElemData[instName][etype]
emap = createElementMap(bNodeList[instName],edata['label'],edata['econn'],ec[etype].numNodes,x,y,z)
# Where an intersection was found between the grid point and implant, add implant to voxel array
indx = np.where(emap['cte']>0)[0]
for gpi in indx:
cte,g,h,r = emap[gpi]
ipc = [g,h,r]
i,j,k = convert1Dto3Dindex(gpi,NX,NY,NZ)
voxels[i,j,k] = BMDvalues[instName][cte].interp(ipc)
# Create element map for the implant, map to 3D space array and then add to voxels array
if showImplant:
print 'Adding implant'
# Get a map for each instance and element type. Then combine maps together
for instName in iElemData.keys():
for etype in iElemData[instName].keys():
edata = iElemData[instName][etype]
emap = createElementMap(iNodeList[instName],edata['label'],edata['econn'],ec[etype].numNodes,x,y,z)
# Where an intersection was found between the grid point and implant, add implant to voxel array
indx = np.where(emap['cte']>0)[0]
for gpi in indx:
i,j,k = convert1Dto3Dindex(gpi,NX,NY,NZ)
voxels[i,j,k] = iDensity
# Get min/max range of voxel values
vmin,vmax = [voxels.min(),voxels.max()]
# Scale voxel values to maximum range
numbits = 8
low,upp = 0, 2**numbits-1
voxels = low + (voxels-vmin)/(vmax-vmin)*upp
voxels = voxels.astype(np.uint16)
# Write CT slices to new directory
print 'Writing CT slice files'
# Create a new sub-directory to keep CT slice files
newSubDirPath = os.path.join(os.getcwd(),newSubDirName)
if os.path.isdir(newSubDirPath):
for i in range(1000):
newSubDirPath = os.path.join(os.getcwd(),newSubDirName+'_%d'%(i+1))
if not os.path.isdir(newSubDirPath): break
os.mkdir(newSubDirPath)
# Assume stack direction is z-direction. Need to reorder voxel array
# Note: The array ds.PixelArray is indexed by [row,col], which is equivalent to [yi,xi]. Also,
# because we are adding to CTvals by z slice, then the resulting index of CTvals is [zi,yi,xi].
# Correct this to more typical index [xi,yi,zi] by swapping xi and zi e.g. zi,yi,xi -> xi,yi,zi
voxels = voxels.swapaxes(0,2)
voxels = voxels[:,::-1,:]
# Setup basic metadata
psx = lx/(NX-1)
psy = ly/(NY-1)
metaData = {}
metaData['PixelSpacing'] = ['%.3f' % v for v in (psx,psy)]
# Write CT slices: One per z-index
for s in range(voxels.shape[0]):
sn = ('%5d.dcm' % (s+1)).replace(' ','0')
fn = os.path.join(newSubDirPath,sn)
metaData['ImagePositionPatient'] = ['%.3f' % v for v in (0.0,0.0,z[s])]
pixel_array = voxels[s]
writeCTslice(pixel_array,fn,metaData)
# User message
print 'Virtual CT has been created in %s' % newSubDirPath
print '\nFinished\n'
# ~~~~~~~~~~
def writeCTslice(pixel_array,filename,metaData):
from dicom.dataset import Dataset, FileDataset
file_meta = Dataset()
file_meta.MediaStorageSOPClassUID = ''
file_meta.MediaStorageSOPInstanceUID = ''
file_meta.ImplementationClassUID = ''
ds = FileDataset(filename,{},file_meta = file_meta,preamble="\0"*128)
ds.BitsAllocated = 16 # 16-bit grey-scale voxel values
ds.SamplesPerPixel = 1 # 1 for grey scale, 4 for RGBA
ds.PixelRepresentation = 0 # 0 for unsigned, 1 for signed
ds.PhotometricInterpretation = 'MONOCHROME2' # 0 is black
ds.ImagePositionPatient = metaData['ImagePositionPatient']
ds.Columns = pixel_array.shape[0]
ds.Rows = pixel_array.shape[1]
ds.PixelSpacing = metaData['PixelSpacing']
if pixel_array.dtype != np.uint16:
pixel_array = pixel_array.astype(np.uint16)
ds.PixelData = pixel_array.tostring()
ds.save_as(filename)
return
| |
#!/usr/bin/env python3
"""This module implements finding of Radio Frequency Interference for LOFAR data.
This module is strongly based on pyCRtools findrfi.py by Arthur Corstanje.
However, it has been heavily modified for use with LOFAR-LIM by Brian Hare
"""
from pickle import load
import numpy as np
from matplotlib import pyplot as plt
from scipy.signal import gaussian
from LoLIM.utilities import processed_data_dir
from LoLIM.signal_processing import half_hann_window, num_double_zeros
def median_sorted_by_power(psort):
lpsort = len(psort)
index = 0
if lpsort % 2 == 0:
index = int(lpsort/2)-1
else:
index = int(lpsort/2)
modifier = 0
out_psort = []
start_index = index
for i in range(0,lpsort):
out_psort.append(psort[index])
if modifier == 0:
modifier = 1
elif modifier > 0:
modifier = -modifier
elif modifier < 0:
modifier = -(modifier - 1)
else:
print("Your head a splode")
index = start_index + modifier
return out_psort
def FindRFI(TBB_in_file, block_size, initial_block, num_blocks, max_blocks=None, verbose=False, figure_location=None, lower_frequency=10E6, upper_frequency=90E6, num_dbl_z=100):
""" use phase-variance to find RFI in data. TBB_in_file should be a MultiFile_Dal1, encompassing the data for one station. block_size should be around 65536 (2^16).
num_blocks should be at least 20. Sometimes a block needs to be skipped, so max_blocks shows the maximum number of blocks used (after initial block) used to find num_blocks
number of good blocks. initial block should be such that there is no lightning in the max_blocks number of blocks. If max_blocks is None (default), it is set to num_blocks
figure_location should be a folder to save relavent figures, default is None (do not save figures). num_dbl_z is number of double zeros allowed in a block, if there are too
many, then there could be data loss.
returns a dictionary with the following key-value pairs:
"ave_spectrum_magnitude":a numpy array that contains the average of the magnitude of the frequency spectrum
"ave_spectrum_phase": a numpy array containing the average of the phase of the frequency spectrum
"phase_variance":a numpy array containing the phase variance of each frequency channel
"dirty_channels": an array of indeces indicating the channels that are contaminated with RFI
"""
if max_blocks is None:
max_blocks = num_blocks
window_function = half_hann_window(block_size, 0.1)
num_antennas = len(TBB_in_file.get_antenna_names())
if figure_location is not None:
max_over_blocks = np.zeros((num_antennas, max_blocks), dtype=np.double)
#### step one: find which blocks are good, and find average power ####
oneAnt_data = np.empty( block_size, dtype=np.double )
if verbose:
print( 'finding good blocks' )
blocks_good = np.zeros((num_antennas, max_blocks), dtype=bool)
num_good_blocks = np.zeros(num_antennas, dtype=int)
average_power = np.zeros(num_antennas, dtype=np.double)
for block_i in range(max_blocks):
block = block_i + initial_block
for ant_i in range(num_antennas):
oneAnt_data[:] = TBB_in_file.get_data( block_size*block, block_size, antenna_index=ant_i )
if num_double_zeros( oneAnt_data ) < num_dbl_z: ## this antenna on this block is good
blocks_good[ ant_i, block_i ] = True
num_good_blocks[ ant_i ] += 1
oneAnt_data *= window_function
FFT_data = np.fft.fft( oneAnt_data )
np.abs(FFT_data, out=FFT_data)
magnitude = FFT_data
magnitude *= magnitude
average_power[ ant_i ] += np.real( np.sum( magnitude ) )
# else:
# print(block_i, ant_i, num_double_zeros( oneAnt_data ))
# plt.plot(oneAnt_data)
# plt.show()
if figure_location is not None:
max_over_blocks[ant_i, block_i] = np.max( oneAnt_data[ant_i] )
average_power[num_good_blocks!=0] /= num_good_blocks[num_good_blocks!=0]
#### now we try to find the best referance antenna, Require that antenan allows for maximum number of good antennas, and has **best** average recieved power
allowed_num_antennas = np.empty( num_antennas, dtype=np.int) ## if ant_i is choosen to be your referance antnena, then allowed_num_antennas[ ant_i ] is the number of antennas with num_blocks good blocks
for ant_i in range(num_antennas):### fill allowed_num_antennas
blocks_can_use = np.where( blocks_good[ ant_i ] )[0]
num_good_blocks_per_antenna = np.sum( blocks_good[:,blocks_can_use], axis=1 )
allowed_num_antennas[ ant_i ] = np.sum( num_good_blocks_per_antenna >= num_blocks )
max_allowed_antennas = np.max(allowed_num_antennas)
if max_allowed_antennas < 2:
print("ERROR: station", TBB_in_file.get_station_name(), "cannot find RFI")
return
## pick ref antenna that allows max number of atnennas, and has most median amount of power
can_be_ref_antenna = (allowed_num_antennas == max_allowed_antennas )
sorted_by_power = np.argsort( average_power )
mps = median_sorted_by_power(sorted_by_power)
for ant_i in mps:
if can_be_ref_antenna[ant_i]:
ref_antenna = ant_i
break
if verbose:
print( 'Taking channel %d as reference antenna' % ref_antenna)
## define some helping variables ##
good_blocks = np.where( blocks_good[ ref_antenna ] )[0]
num_good_blocks = np.sum( blocks_good[:,good_blocks], axis=1 )
antenna_is_good = num_good_blocks >= num_blocks
blocks_good[np.logical_not(antenna_is_good) , : ] = False
#### process data ####
num_processed_blocks = np.zeros(num_antennas, dtype=int)
frequencies = np.fft.fftfreq(block_size, 1.0/TBB_in_file.get_sample_frequency())
lower_frequency_index = np.searchsorted(frequencies[:int(len(frequencies)/2)], lower_frequency)
upper_frequency_index = np.searchsorted(frequencies[:int(len(frequencies)/2)], upper_frequency)
phase_mean = np.zeros( (num_antennas, upper_frequency_index-lower_frequency_index), dtype=complex )
spectrum_mean = np.zeros( (num_antennas, upper_frequency_index-lower_frequency_index), dtype=np.double)
data = np.empty( (num_antennas, len(frequencies)), dtype=np.complex )
temp_mag_spectrum = np.empty( (num_antennas, len(frequencies)), dtype=np.double)
temp_phase_spectrum = np.empty( (num_antennas, len(frequencies)), dtype=np.complex)
for block_i in good_blocks:
block = block_i + initial_block
if verbose:
print( 'Doing block %d' % block )
for ant_i in range(num_antennas):
if (num_processed_blocks[ant_i] == num_blocks and not ant_i==ref_antenna) or not blocks_good[ant_i, block_i]:
continue
oneAnt_data[:] = TBB_in_file.get_data( block_size*block, block_size, antenna_index=ant_i )
##window the data
# Note: No hanning window if we want to measure power accurately from spectrum
# in the same units as power from timeseries. Applying a window gives (at least) a scale factor
# difference!
# But no window makes the cleaning less effective... :(
oneAnt_data *= window_function
data[ant_i] = np.fft.fft( oneAnt_data )
data /= block_size
np.abs( data, out=temp_mag_spectrum )
temp_phase_spectrum[:] = data
temp_phase_spectrum /= (temp_mag_spectrum + 1.0E-15)
temp_phase_spectrum[:,:] /= temp_phase_spectrum[ref_antenna,:]
temp_mag_spectrum *= temp_mag_spectrum ## square
for ant_i in range(num_antennas):
if (num_processed_blocks[ant_i] == num_blocks and not ant_i==ref_antenna) or not blocks_good[ant_i, block_i]:
continue
phase_mean[ant_i,:] += temp_phase_spectrum[ant_i][lower_frequency_index:upper_frequency_index]
spectrum_mean[ant_i,:] += temp_mag_spectrum[ant_i][lower_frequency_index:upper_frequency_index]
num_processed_blocks[ant_i] += 1
if np.min(num_processed_blocks[antenna_is_good]) == num_blocks:
break
if verbose:
print(num_blocks, "analyzed blocks", np.sum(antenna_is_good), "analyzed antennas out of", len(antenna_is_good))
## get only good antennas
antenna_is_good[ref_antenna] = False ## we don't want to analyze the phase stability of the referance antenna
### get mean and phase stability ###
spectrum_mean /= num_blocks
phase_stability = np.abs(phase_mean)
phase_stability *= -1.0/num_blocks
phase_stability += 1.0
#### get median of stability by channel, across each antenna ###
median_phase_spread_byChannel = np.median(phase_stability[antenna_is_good], axis=0)
#### get median across all chanells
median_spread = np.median( median_phase_spread_byChannel )
#### create a noise cuttoff###
sorted_phase_spreads = np.sort( median_phase_spread_byChannel )
N = len(median_phase_spread_byChannel)
noise = sorted_phase_spreads[int(N*0.95)] - sorted_phase_spreads[int(N/2)]
#### get channels contaminated by RFI, where phase stability is smaller than noise ###
dirty_channels = np.where( median_phase_spread_byChannel < (median_spread-3*noise))[0]
### extend dirty channels by some size, in order to account for shoulders ####
extend_dirty_channels = np.zeros(N, dtype=bool)
half_flagwidth = int(block_size/8192)
for i in dirty_channels:
flag_min = i-half_flagwidth
flag_max = i+half_flagwidth
if flag_min < 0:
flag_min = 0
if flag_max >= N:
flag_max = N-1
extend_dirty_channels[flag_min:flag_max] = True
dirty_channels = np.where( extend_dirty_channels )[0]
antenna_is_good[ref_antenna] = True ## cause'.... ya know.... it is
#### plot and return data ####
frequencies = frequencies[lower_frequency_index:upper_frequency_index]
if figure_location is not None:
frequencies_MHZ = frequencies*1.0E-6
plt.figure()
plt.plot(frequencies_MHZ, median_phase_spread_byChannel)
plt.axhline( median_spread-3*noise, color='r')
plt.title("Phase spread vs frequency. Red horizontal line shows cuttoff.")
plt.ylabel("Spread value")
plt.xlabel("Frequency [MHz]")
# plt.legend()
if figure_location == "show":
plt.show()
else:
plt.savefig(figure_location+'/phase_spreads.png')
plt.close()
plt.figure()
plt.plot(frequencies_MHZ, spectrum_mean[ ref_antenna ])
plt.plot(frequencies_MHZ[dirty_channels], spectrum_mean[ ref_antenna ][dirty_channels], 'ro')
plt.xlabel("Frequency [MHz]")
plt.ylabel("magnitude")
plt.yscale('log', nonposy='clip')
# plt.legend()
if figure_location == "show":
plt.show()
else:
plt.savefig(figure_location+'/magnitude.png')
plt.close()
plt.figure()
for maxes, ant_name in zip(max_over_blocks, TBB_in_file.get_antenna_names()):
plt.plot(maxes, label=ant_name)
plt.ylabel("maximum")
plt.xlabel("block index")
plt.legend()
if figure_location == "show":
plt.show()
else:
plt.savefig(figure_location+'/max_over_blocks.png')
plt.close()
output_dict = {}
output_dict["ave_spectrum_magnitude"] = spectrum_mean
output_dict["ave_spectrum_phase"] = np.angle(phase_mean, deg=False)
output_dict["phase_variance"] = phase_stability
output_dict["dirty_channels"] = dirty_channels + lower_frequency_index
output_dict["blocksize"] = block_size
cleaned_spectrum = np.array( spectrum_mean )
cleaned_spectrum[:, dirty_channels] = 0.0
output_dict["cleaned_spectrum_magnitude"] = cleaned_spectrum
output_dict["cleaned_power"] = 2*np.sum( cleaned_spectrum, axis=1 )
print( 'cleaned power:', output_dict["cleaned_power"] )
print('old power', 2*np.sum( spectrum_mean, axis=1 ) )
print('num dirty channels:', len(dirty_channels))
output_dict["antenna_names"] = TBB_in_file.get_antenna_names()
output_dict["timestamp"] = TBB_in_file.get_timestamp()
output_dict["antennas_good"] = antenna_is_good
output_dict["frequency"] = frequencies
return output_dict
class window_and_filter:
def __init__(self, blocksize=None, find_RFI=None, timeID=None, sname=None, lower_filter=30.0E6, upper_filter=80.0E6, half_window_percent=0.1, time_per_sample=5.0E-9, filter_roll_width = 2.5E6):
self.lower_filter = lower_filter
self.upper_filter = upper_filter
if timeID is not None:
if find_RFI is None:
find_RFI = "/findRFI/findRFI_results"
find_RFI = processed_data_dir(timeID) + find_RFI
if isinstance(find_RFI, str): ## load findRFI data from file
with open( find_RFI, 'rb' ) as fin:
find_RFI = load(fin)[ sname ]
self.RFI_data = find_RFI
if self.RFI_data is not None:
if blocksize is None:
blocksize = self.RFI_data['blocksize']
elif self.RFI_data['blocksize'] != blocksize:
print("blocksize and findRFI blocksize must match")
quit()
elif blocksize is None:
print("window and filter needs a blocksize")
## TODO: check block sizes are consistant
quit()
self.blocksize = blocksize
self.half_window_percent = half_window_percent
self.half_hann_window = half_hann_window(blocksize, half_window_percent)
FFT_frequencies = np.fft.fftfreq(blocksize, d=time_per_sample)
self.bandpass_filter = np.zeros( len(FFT_frequencies), dtype=complex)
self.bandpass_filter[ np.logical_and( FFT_frequencies>=lower_filter, FFT_frequencies<=upper_filter ) ] = 1.0
width = filter_roll_width/(FFT_frequencies[1]-FFT_frequencies[0])
if width > 1:
gaussian_weights = gaussian(len(FFT_frequencies), width )
self.bandpass_filter = np.convolve(self.bandpass_filter, gaussian_weights, mode='same' )
self.bandpass_filter /= np.max(self.bandpass_filter) ##convolution changes the peak value
self.FFT_frequencies = FFT_frequencies
## completly reject low-frequency bits
self.bandpass_filter[0] = 0.0
self.bandpass_filter[1] = 0.0
##reject RFI
if self.RFI_data is not None:
self.bandpass_filter[ self.RFI_data["dirty_channels"] ] = 0.0
def get_frequencies(self):
return self.FFT_frequencies
def get_frequency_response(self):
return self.bandpass_filter
def filter(self, data, additional_filter=None, whiten=False):
data[...,:] *= self.half_hann_window
FFT_data = np.fft.fft( data, axis=-1 )
if whiten:
FFT_data /= np.abs(FFT_data)
FFT_data[...,:] *= self.bandpass_filter ## note that this implicitly makes a hilbert transform! (negative frequencies set to zero)
if additional_filter:
FFT_data[...,:] *= additional_filter
return np.fft.ifft(FFT_data, axis=-1)
def filter_FFT(self, data, additional_filter=None):
data[...,:] *= self.half_hann_window
FFT_data = np.fft.fft( data, axis=-1 )
FFT_data[...,:] *= self.bandpass_filter ## note that this implicitly makes a hilbert transform! (negative frequencies set to zero)
if additional_filter:
FFT_data[...,:] *= additional_filter
return FFT_data
# def filter(self, data):
#
# data[...,:] *= self.half_hann_window
# FFT_data = np.fft.fft( data, axis=-1 )
#
# FFT_data[...,:] *= self.bandpass_filter ## note that this implicitly makes a hilbert transform! (negative frequencies set to zero)
# # Reject DC component
# FFT_data[..., 0] = 0.0
# # Also reject 1st harmonic (gives a lot of spurious power with Hanning window)
# FFT_data[..., 1] = 0.0
#
## remove RFI
# if self.RFI_data is not None:
# FFT_data[..., self.RFI_data["dirty_channels"]] = 0
#
#
# return np.fft.ifft(FFT_data, axis=-1)
if __name__ == "__main__":
from IO.raw_tbb_IO import MultiFile_Dal1, filePaths_by_stationName
timeID = "D20170929T202255.000Z"
station = "CS002"
antenna_id = 0
## these lines are anachronistic and should be fixed at some point
from LoLIM import utilities
utilities.default_raw_data_loc = "/exp_app2/appexp1/public/raw_data"
utilities.default_processed_data_loc = "/home/brian/processed_files"
block_size = 2**16
block_number = 3600
raw_fpaths = filePaths_by_stationName(timeID)
data_file = MultiFile_Dal1(raw_fpaths[station])
### find the radio stations that make noise ####
### this searches the beginning of the data file, before the flash, for noise due to human radio stations ###
initial_block = 1
number_blocks = 20
RFI = FindRFI(data_file, block_size, initial_block, number_blocks, max_blocks=100, verbose=True, figure_location=None) ##set figure location to some folder to see output plots
quit()
RFI_filter = window_and_filter(block_size, RFI, lower_filter=30E6, upper_filter=80E6)
# plt.plot( RFI_filter.bandpass_filter )
# plt.show()
### now open one antenna of data
data = np.empty((block_size), dtype=np.double)
data[:] = data_file.get_data(block_size*block_number, block_size, antenna_index=antenna_id) ##get and store the data
##filter it
filtered_data = RFI_filter.filter( data ) ## this works for multiple antennas as well, (like done at bottom of raw_tbb_IO.py)
## note that filtering the data turns it into complex numbers. The real component is the value of the signal, the absolute value is an envelope over the data
plt.plot(np.abs(filtered_data), 'g', linewidth=3 )
plt.plot(np.real(filtered_data),'r', linewidth=3)
plt.show()
| |
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.compute.rpcapi
"""
import contextlib
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova.objects import block_device as objects_block_dev
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
CONF = cfg.CONF
class ComputeRpcAPITestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
self.fake_flavor_obj = fake_flavor.fake_flavor_obj(self.context)
self.fake_flavor = jsonutils.to_primitive(self.fake_flavor_obj)
instance_attr = {'host': 'fake_host',
'instance_type_id': self.fake_flavor_obj['id'],
'instance_type': self.fake_flavor_obj}
self.fake_instance_obj = fake_instance.fake_instance_obj(self.context,
**instance_attr)
self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj)
self.fake_volume_bdm = objects_block_dev.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'instance_uuid': self.fake_instance_obj.uuid,
'volume_id': 'fake-volume-id'}))
def _test_compute_api(self, method, rpc_method,
expected_args=None, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = kwargs.pop('rpcapi_class', compute_rpcapi.ComputeAPI)()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.compute_topic)
orig_prepare = rpcapi.client.prepare
base_version = rpcapi.client.target.version
expected_version = kwargs.pop('version', base_version)
expected_kwargs = kwargs.copy()
if expected_args:
expected_kwargs.update(expected_args)
if 'host_param' in expected_kwargs:
expected_kwargs['host'] = expected_kwargs.pop('host_param')
else:
expected_kwargs.pop('host', None)
cast_and_call = ['confirm_resize', 'stop_instance']
if rpc_method == 'call' and method in cast_and_call:
if method == 'confirm_resize':
kwargs['cast'] = False
else:
kwargs['do_cast'] = False
if 'host' in kwargs:
host = kwargs['host']
elif 'instances' in kwargs:
host = kwargs['instances'][0]['host']
else:
host = kwargs['instance']['host']
with contextlib.nested(
mock.patch.object(rpcapi.client, rpc_method),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
prepare_mock.return_value = rpcapi.client
if '_return_value' in kwargs:
rpc_mock.return_value = kwargs.pop('_return_value')
del expected_kwargs['_return_value']
elif 'return_bdm_object' in kwargs:
del kwargs['return_bdm_object']
rpc_mock.return_value = objects_block_dev.BlockDeviceMapping()
elif rpc_method == 'call':
rpc_mock.return_value = 'foo'
else:
rpc_mock.return_value = None
csv_mock.side_effect = (
lambda v: orig_prepare(version=v).can_send_version())
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with(version=expected_version,
server=host)
rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs)
def test_add_aggregate_host(self):
self._test_compute_api('add_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={})
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
instance=self.fake_instance_obj, network_id='id',
version='4.0')
def test_attach_interface(self):
self._test_compute_api('attach_interface', 'call',
instance=self.fake_instance_obj, network_id='id',
port_id='id2', version='4.0', requested_ip='192.168.1.50')
def test_attach_volume(self):
self._test_compute_api('attach_volume', 'cast',
instance=self.fake_instance_obj, bdm=self.fake_volume_bdm,
version='4.0')
def test_change_instance_metadata(self):
self._test_compute_api('change_instance_metadata', 'cast',
instance=self.fake_instance_obj, diff={}, version='4.0')
def test_check_instance_shared_storage(self):
self._test_compute_api('check_instance_shared_storage', 'call',
instance=self.fake_instance_obj, data='foo',
version='4.0')
def test_confirm_resize_cast(self):
self._test_compute_api('confirm_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'foo'},
host='host', reservations=list('fake_res'))
def test_confirm_resize_call(self):
self._test_compute_api('confirm_resize', 'call',
instance=self.fake_instance_obj, migration={'id': 'foo'},
host='host', reservations=list('fake_res'))
def test_detach_interface(self):
self._test_compute_api('detach_interface', 'cast',
version='4.0', instance=self.fake_instance_obj,
port_id='fake_id')
def test_detach_volume(self):
self._test_compute_api('detach_volume', 'cast',
instance=self.fake_instance_obj, volume_id='id',
version='4.0')
def test_finish_resize(self):
self._test_compute_api('finish_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'foo'},
image='image', disk_info='disk_info', host='host',
reservations=list('fake_res'))
def test_finish_revert_resize(self):
self._test_compute_api('finish_revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host', reservations=list('fake_res'))
def test_get_console_output(self):
self._test_compute_api('get_console_output', 'call',
instance=self.fake_instance_obj, tail_length='tl',
version='4.0')
def test_get_console_pool_info(self):
self._test_compute_api('get_console_pool_info', 'call',
console_type='type', host='host')
def test_get_console_topic(self):
self._test_compute_api('get_console_topic', 'call', host='host')
def test_get_diagnostics(self):
self._test_compute_api('get_diagnostics', 'call',
instance=self.fake_instance_obj, version='4.0')
def test_get_instance_diagnostics(self):
expected_args = {'instance': self.fake_instance}
self._test_compute_api('get_instance_diagnostics', 'call',
expected_args, instance=self.fake_instance_obj,
version='4.0')
def test_get_vnc_console(self):
self._test_compute_api('get_vnc_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='4.0')
def test_get_spice_console(self):
self._test_compute_api('get_spice_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='4.0')
def test_get_rdp_console(self):
self._test_compute_api('get_rdp_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='4.0')
def test_get_serial_console(self):
self._test_compute_api('get_serial_console', 'call',
instance=self.fake_instance_obj, console_type='serial',
version='4.0')
def test_validate_console_port(self):
self._test_compute_api('validate_console_port', 'call',
instance=self.fake_instance_obj, port="5900",
console_type="novnc", version='4.0')
def test_host_maintenance_mode(self):
self._test_compute_api('host_maintenance_mode', 'call',
host_param='param', mode='mode', host='host')
def test_host_power_action(self):
self._test_compute_api('host_power_action', 'call', action='action',
host='host')
def test_inject_network_info(self):
self._test_compute_api('inject_network_info', 'cast',
instance=self.fake_instance_obj)
def test_live_migration(self):
self._test_compute_api('live_migration', 'cast',
instance=self.fake_instance_obj, dest='dest',
block_migration='blockity_block', host='tsoh',
migration='migration',
migrate_data={}, version='4.2')
def test_post_live_migration_at_destination(self):
self._test_compute_api('post_live_migration_at_destination', 'cast',
instance=self.fake_instance_obj,
block_migration='block_migration', host='host', version='4.0')
def test_pause_instance(self):
self._test_compute_api('pause_instance', 'cast',
instance=self.fake_instance_obj)
def test_soft_delete_instance(self):
self._test_compute_api('soft_delete_instance', 'cast',
instance=self.fake_instance_obj,
reservations=['uuid1', 'uuid2'])
def test_swap_volume(self):
self._test_compute_api('swap_volume', 'cast',
instance=self.fake_instance_obj, old_volume_id='oldid',
new_volume_id='newid')
def test_restore_instance(self):
self._test_compute_api('restore_instance', 'cast',
instance=self.fake_instance_obj, version='4.0')
def test_pre_live_migration(self):
self._test_compute_api('pre_live_migration', 'call',
instance=self.fake_instance_obj,
block_migration='block_migration', disk='disk', host='host',
migrate_data=None, version='4.0')
def test_prep_resize(self):
self._test_compute_api('prep_resize', 'cast',
instance=self.fake_instance_obj,
instance_type=self.fake_flavor_obj,
image='fake_image', host='host',
reservations=list('fake_res'),
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
node='node', clean_shutdown=True, version='4.1')
self.flags(compute='4.0', group='upgrade_levels')
expected_args = {'instance_type': self.fake_flavor}
self._test_compute_api('prep_resize', 'cast', expected_args,
instance=self.fake_instance_obj,
instance_type=self.fake_flavor_obj,
image='fake_image', host='host',
reservations=list('fake_res'),
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
node='node', clean_shutdown=True, version='4.0')
def test_reboot_instance(self):
self.maxDiff = None
self._test_compute_api('reboot_instance', 'cast',
instance=self.fake_instance_obj,
block_device_info={},
reboot_type='type')
def test_rebuild_instance(self):
self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
injected_files='None', image_ref='None', orig_image_ref='None',
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, version='4.0')
def test_reserve_block_device_name(self):
self._test_compute_api('reserve_block_device_name', 'call',
instance=self.fake_instance_obj, device='device',
volume_id='id', disk_bus='ide', device_type='cdrom',
version='4.0',
_return_value=objects_block_dev.BlockDeviceMapping())
def refresh_provider_fw_rules(self):
self._test_compute_api('refresh_provider_fw_rules', 'cast',
host='host')
def test_refresh_security_group_rules(self):
self._test_compute_api('refresh_security_group_rules', 'cast',
security_group_id='id', host='host', version='4.0')
def test_refresh_security_group_members(self):
self._test_compute_api('refresh_security_group_members', 'cast',
security_group_id='id', host='host', version='4.0')
def test_refresh_instance_security_rules(self):
expected_args = {'instance': self.fake_instance}
self._test_compute_api('refresh_instance_security_rules', 'cast',
expected_args, host='fake_host',
instance=self.fake_instance_obj, version='4.0')
def test_remove_aggregate_host(self):
self._test_compute_api('remove_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={})
def test_remove_fixed_ip_from_instance(self):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
instance=self.fake_instance_obj, address='addr',
version='4.0')
def test_remove_volume_connection(self):
self._test_compute_api('remove_volume_connection', 'call',
instance=self.fake_instance_obj, volume_id='id', host='host',
version='4.0')
def test_rescue_instance(self):
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance_obj, rescue_password='pw',
rescue_image_ref='fake_image_ref',
clean_shutdown=True, version='4.0')
def test_reset_network(self):
self._test_compute_api('reset_network', 'cast',
instance=self.fake_instance_obj)
def test_resize_instance(self):
self._test_compute_api('resize_instance', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
image='image', instance_type=self.fake_flavor_obj,
reservations=list('fake_res'),
clean_shutdown=True, version='4.1')
self.flags(compute='4.0', group='upgrade_levels')
expected_args = {'instance_type': self.fake_flavor}
self._test_compute_api('resize_instance', 'cast', expected_args,
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
image='image', instance_type=self.fake_flavor_obj,
reservations=list('fake_res'),
clean_shutdown=True, version='4.0')
def test_resume_instance(self):
self._test_compute_api('resume_instance', 'cast',
instance=self.fake_instance_obj)
def test_revert_resize(self):
self._test_compute_api('revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host', reservations=list('fake_res'))
def test_set_admin_password(self):
self._test_compute_api('set_admin_password', 'call',
instance=self.fake_instance_obj, new_pass='pw',
version='4.0')
def test_set_host_enabled(self):
self._test_compute_api('set_host_enabled', 'call',
enabled='enabled', host='host')
def test_get_host_uptime(self):
self._test_compute_api('get_host_uptime', 'call', host='host')
def test_backup_instance(self):
self._test_compute_api('backup_instance', 'cast',
instance=self.fake_instance_obj, image_id='id',
backup_type='type', rotation='rotation')
def test_snapshot_instance(self):
self._test_compute_api('snapshot_instance', 'cast',
instance=self.fake_instance_obj, image_id='id')
def test_start_instance(self):
self._test_compute_api('start_instance', 'cast',
instance=self.fake_instance_obj)
def test_stop_instance_cast(self):
self._test_compute_api('stop_instance', 'cast',
instance=self.fake_instance_obj,
clean_shutdown=True, version='4.0')
def test_stop_instance_call(self):
self._test_compute_api('stop_instance', 'call',
instance=self.fake_instance_obj,
clean_shutdown=True, version='4.0')
def test_suspend_instance(self):
self._test_compute_api('suspend_instance', 'cast',
instance=self.fake_instance_obj)
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance_obj, bdms=[],
reservations=['uuid1', 'uuid2'], version='4.0')
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
instance=self.fake_instance_obj)
def test_unrescue_instance(self):
self._test_compute_api('unrescue_instance', 'cast',
instance=self.fake_instance_obj, version='4.0')
def test_shelve_instance(self):
self._test_compute_api('shelve_instance', 'cast',
instance=self.fake_instance_obj, image_id='image_id',
clean_shutdown=True, version='4.0')
def test_shelve_offload_instance(self):
self._test_compute_api('shelve_offload_instance', 'cast',
instance=self.fake_instance_obj,
clean_shutdown=True, version='4.0')
def test_unshelve_instance(self):
self._test_compute_api('unshelve_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
filter_properties={'fakeprop': 'fakeval'}, node='node',
version='4.0')
def test_volume_snapshot_create(self):
self._test_compute_api('volume_snapshot_create', 'cast',
instance=self.fake_instance_obj, volume_id='fake_id',
create_info={}, version='4.0')
def test_volume_snapshot_delete(self):
self._test_compute_api('volume_snapshot_delete', 'cast',
instance=self.fake_instance_obj, volume_id='fake_id',
snapshot_id='fake_id2', delete_info={}, version='4.0')
def test_external_instance_event(self):
self._test_compute_api('external_instance_event', 'cast',
instances=[self.fake_instance_obj],
events=['event'],
version='4.0')
def test_build_and_run_instance(self):
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
request_spec={'request': 'spec'}, filter_properties=[],
admin_password='passwd', injected_files=None,
requested_networks=['network1'], security_groups=None,
block_device_mapping=None, node='node', limits=[],
version='4.0')
def test_quiesce_instance(self):
self._test_compute_api('quiesce_instance', 'call',
instance=self.fake_instance_obj, version='4.0')
def test_unquiesce_instance(self):
self._test_compute_api('unquiesce_instance', 'cast',
instance=self.fake_instance_obj, mapping=None, version='4.0')
| |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
import csv
import os
import numpy as np
import PIL.Image
class ObjectType:
Dontcare, Car, Van, Truck, Bus, Pickup, VehicleWithTrailer, SpecialVehicle,\
Person, Person_fa, Person_unsure, People, Cyclist, Tram, Person_Sitting,\
Misc = range(16)
def __init__(self):
pass
class Bbox:
def __init__(self, x_left=0, y_top=0, x_right=0, y_bottom=0):
self.xl = x_left
self.yt = y_top
self.xr = x_right
self.yb = y_bottom
def area(self):
return (self.xr - self.xl) * (self.yb - self.yt)
def width(self):
return self.xr - self.xl
def height(self):
return self.yb - self.yt
def get_array(self):
return [self.xl, self.yt, self.xr, self.yb]
class GroundTruthObj:
""" This class is the data ground-truth
#Values Name Description
----------------------------------------------------------------------------
1 type Class ID
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries.
-1 corresponds to a don't care region.
1 occluded Integer (-1,0,1,2) indicating occlusion state:
-1 = unkown, 0 = fully visible,
1 = partly occluded, 2 = largely occluded
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
Here, 'DontCare' labels denote regions in which objects have not been labeled,
for example because they have been too far away from the laser scanner.
"""
# default class mappings
OBJECT_TYPES = {
'bus': ObjectType.Bus,
'car': ObjectType.Car,
'cyclist': ObjectType.Cyclist,
'pedestrian': ObjectType.Person,
'people': ObjectType.People,
'person': ObjectType.Person,
'person_sitting': ObjectType.Person_Sitting,
'person-fa': ObjectType.Person_fa,
'person?': ObjectType.Person_unsure,
'pickup': ObjectType.Pickup,
'misc': ObjectType.Misc,
'special-vehicle': ObjectType.SpecialVehicle,
'tram': ObjectType.Tram,
'truck': ObjectType.Truck,
'van': ObjectType.Van,
'vehicle-with-trailer': ObjectType.VehicleWithTrailer}
def __init__(self):
self.stype = ''
self.truncated = 0
self.occlusion = 0
self.angle = 0
self.height = 0
self.width = 0
self.length = 0
self.locx = 0
self.locy = 0
self.locz = 0
self.roty = 0
self.bbox = Bbox()
self.object = ObjectType.Dontcare
@classmethod
def lmdb_format_length(cls):
"""
width of an LMDB datafield returned by the gt_to_lmdb_format function.
:return:
"""
return 16
def gt_to_lmdb_format(self):
"""
For storage of a bbox ground truth object into a float32 LMDB.
Sort-by attribute is always the last value in the array.
"""
result = [
# bbox in x,y,w,h format:
self.bbox.xl,
self.bbox.yt,
self.bbox.xr - self.bbox.xl,
self.bbox.yb - self.bbox.yt,
# alpha angle:
self.angle,
# class number:
self.object,
0,
# Y axis rotation:
self.roty,
# bounding box attributes:
self.truncated,
self.occlusion,
# object dimensions:
self.length,
self.width,
self.height,
self.locx,
self.locy,
# depth (sort-by attribute):
self.locz,
]
assert(len(result) is self.lmdb_format_length())
return result
def set_type(self):
self.object = self.OBJECT_TYPES.get(self.stype, ObjectType.Dontcare)
class GroundTruth:
"""
this class loads the ground truth
"""
def __init__(self,
label_dir,
label_ext='.txt',
label_delimiter=' ',
min_box_size=None,
class_mappings=None):
self.label_dir = label_dir
self.label_ext = label_ext # extension of label files
self.label_delimiter = label_delimiter # space is used as delimiter in label files
self._objects_all = dict() # positive bboxes across images
self.min_box_size = min_box_size
if class_mappings is not None:
GroundTruthObj.OBJECT_TYPES = class_mappings
def update_objects_all(self, _key, _bboxes):
if _bboxes:
self._objects_all[_key] = _bboxes
else:
self._objects_all[_key] = []
def load_gt_obj(self):
""" load bbox ground truth from files either via the provided label directory or list of label files"""
files = os.listdir(self.label_dir)
files = filter(lambda x: x.endswith(self.label_ext), files)
if len(files) == 0:
raise RuntimeError('error: no label files found in %s' % self.label_dir)
for label_file in files:
objects_per_image = list()
with open( os.path.join(self.label_dir, label_file), 'rb') as flabel:
for row in csv.reader(flabel, delimiter=self.label_delimiter):
# load data
gt = GroundTruthObj()
gt.stype = row[0].lower()
gt.truncated = float(row[1])
gt.occlusion = int(row[2])
gt.angle = float(row[3])
gt.bbox.xl = float(row[4])
gt.bbox.yt = float(row[5])
gt.bbox.xr = float(row[6])
gt.bbox.yb = float(row[7])
gt.height = float(row[8])
gt.width = float(row[9])
gt.length = float(row[10])
gt.locx = float(row[11])
gt.locy = float(row[12])
gt.locz = float(row[13])
gt.roty = float(row[14])
gt.set_type()
box_dimensions = [gt.bbox.xr - gt.bbox.xl, gt.bbox.yb - gt.bbox.yt]
if self.min_box_size is not None:
if not all(x >= self.min_box_size for x in box_dimensions):
# object is smaller than threshold => set to "DontCare"
gt.stype = ''
gt.object = ObjectType.Dontcare
objects_per_image.append(gt)
key = int(os.path.splitext(label_file)[0])
self.update_objects_all(key, objects_per_image)
@property
def objects_all(self):
return self._objects_all
# return the # of pixels remaining in a
def pad_bbox(arr, max_bboxes=64, bbox_width=16):
if arr.shape[0] > max_bboxes:
raise ValueError(
'Too many bounding boxes (%d > %d)' % arr.shape[0], max_bboxes
)
# fill remainder with zeroes:
data = np.zeros((max_bboxes+1, bbox_width), dtype='float')
# number of bounding boxes:
data[0][0] = arr.shape[0]
# width of a bounding box:
data[0][1] = bbox_width
# bounding box data. Merge nothing if no bounding boxes exist.
if arr.shape[0] > 0:
data[1:1 + arr.shape[0]] = arr
return data
def bbox_to_array(arr, label=0, max_bboxes=64, bbox_width=16):
"""
Converts a 1-dimensional bbox array to an image-like
3-dimensional array CHW array
"""
arr = pad_bbox(arr, max_bboxes, bbox_width)
return arr[np.newaxis, :, :]
def bbox_overlap(abox, bbox):
# the abox box
x11 = abox[0]
y11 = abox[1]
x12 = abox[0] + abox[2] - 1
y12 = abox[1] + abox[3] - 1
# the closer box
x21 = bbox[0]
y21 = bbox[1]
x22 = bbox[0] + bbox[2] - 1
y22 = bbox[1] + bbox[3] - 1
overlap_box_x2 = min(x12, x22)
overlap_box_x1 = max(x11, x21)
overlap_box_y2 = min(y12, y22)
overlap_box_y1 = max(y11, y21)
# make sure we preserve any non-bbox components
overlap_box = list(bbox)
overlap_box[0] = overlap_box_x1
overlap_box[1] = overlap_box_y1
overlap_box[2] = overlap_box_x2-overlap_box_x1+1
overlap_box[3] = overlap_box_y2-overlap_box_y1+1
xoverlap = max(0, overlap_box_x2 - overlap_box_x1)
yoverlap = max(0, overlap_box_y2 - overlap_box_y1)
overlap_pix = xoverlap * yoverlap
return overlap_pix, overlap_box
def pad_image(img, padding_image_height, padding_image_width):
"""
pad a single image to the specified dimensions
"""
src_width = img.size[0]
src_height = img.size[1]
if padding_image_width < src_width:
raise ValueError("Source image width %d is greater than padding width %d" % (src_width, padding_image_width))
if padding_image_height < src_height:
raise ValueError("Source image height %d is greater than padding height %d" % (src_height, padding_image_height))
padded_img = PIL.Image.new(
img.mode,
(padding_image_width, padding_image_height),
"black")
padded_img.paste(img, (0, 0)) # copy to top-left corner
return padded_img
def resize_bbox_list(bboxlist, rescale_x=1, rescale_y=1):
# this is expecting x1,y1,w,h:
bboxListNew = []
for bbox in bboxlist:
abox = bbox
abox[0] *= rescale_x
abox[1] *= rescale_y
abox[2] *= rescale_x
abox[3] *= rescale_y
bboxListNew.append(abox)
return bboxListNew
| |
#
# Handler library for Linux IaaS
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
JSON def:
HandlerEnvironment.json
[{
"name": "ExampleHandlerLinux",
"seqNo": "seqNo",
"version": "1.0",
"handlerEnvironment": {
"logFolder": "<your log folder location>",
"configFolder": "<your config folder location>",
"statusFolder": "<your status folder location>",
"heartbeatFile": "<your heartbeat file location>",
}
}]
Example ./config/1.settings
"{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1","protectedSettings":
"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}"
Example HeartBeat
{
"version": 1.0,
"heartbeat" : {
"status": "ready",
"code": 0,
"Message": "Sample Handler running. Waiting for a new configuration from user."
}
}
Example Status Report:
[{"version":"1.0","timestampUTC":"2014-05-29T04:20:13Z","status":{"name":"Chef Extension Handler","operation":"chef-client-run","status":"success","code":0,"formattedMessage":{"lang":"en-US","message":"Chef-client run success"}}}]
"""
import os
import os.path
import sys
import re
import imp
import base64
import json
import tempfile
import time
from os.path import join
import Utils.WAAgentUtil
from Utils.WAAgentUtil import waagent
import logging
import logging.handlers
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
from common import CommonVariables
import platform
import subprocess
import datetime
import Utils.Status
from MachineIdentity import MachineIdentity
import ExtensionErrorCodeHelper
import traceback
DateTimeFormat = "%Y-%m-%dT%H:%M:%SZ"
class HandlerContext:
def __init__(self,name):
self._name = name
self._version = '0.0'
return
class HandlerUtility:
telemetry_data = {}
serializable_telemetry_data = []
ExtErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success
def __init__(self, log, error, short_name):
self._log = log
self._error = error
self.log_message = ""
self._short_name = short_name
self.patching = None
self.storageDetailsObj = None
self.partitioncount = 0
self.logging_file = None
def _get_log_prefix(self):
return '[%s-%s]' % (self._context._name, self._context._version)
def _get_current_seq_no(self, config_folder):
seq_no = -1
cur_seq_no = -1
freshest_time = None
for subdir, dirs, files in os.walk(config_folder):
for file in files:
try:
if(file.endswith('.settings')):
cur_seq_no = int(os.path.basename(file).split('.')[0])
if(freshest_time == None):
freshest_time = os.path.getmtime(join(config_folder,file))
seq_no = cur_seq_no
else:
current_file_m_time = os.path.getmtime(join(config_folder,file))
if(current_file_m_time > freshest_time):
freshest_time = current_file_m_time
seq_no = cur_seq_no
except ValueError:
continue
return seq_no
def get_last_seq(self):
if(os.path.isfile('mrseq')):
seq = waagent.GetFileContents('mrseq')
if(seq):
return int(seq)
return -1
def exit_if_same_seq(self):
current_seq = int(self._context._seq_no)
last_seq = self.get_last_seq()
if(current_seq == last_seq):
self.log("the sequence number are same, so skip, current:" + str(current_seq) + "== last:" + str(last_seq))
sys.exit(0)
def log(self, message,level='Info'):
if sys.version_info > (3,):
if self.logging_file is not None:
self.log_py3(message)
else:
pass
#self.log_to_file()
else:
self._log(self._get_log_prefix() + message)
message = "{0} {1} {2} \n".format(str(datetime.datetime.now()) , level , message)
self.log_message = self.log_message + message
def log_py3(self, msg):
if type(msg) is not str:
msg = str(msg, errors="backslashreplace")
msg = str(datetime.datetime.now()) + " " + str(self._get_log_prefix()) + msg + "\n"
try:
with open(self.logging_file, "a+") as C :
C.write(msg)
except IOError:
pass
def error(self, message):
self._error(self._get_log_prefix() + message)
def fetch_log_message(self):
return self.log_message
def _parse_config(self, ctxt):
config = None
try:
config = json.loads(ctxt)
except:
self.error('JSON exception decoding ' + ctxt)
if config == None:
self.error("JSON error processing settings file:" + ctxt)
else:
handlerSettings = config['runtimeSettings'][0]['handlerSettings']
if 'protectedSettings' in handlerSettings and \
"protectedSettingsCertThumbprint" in handlerSettings and \
handlerSettings['protectedSettings'] is not None and \
handlerSettings["protectedSettingsCertThumbprint"] is not None:
protectedSettings = handlerSettings['protectedSettings']
thumb = handlerSettings['protectedSettingsCertThumbprint']
cert = waagent.LibDir + '/' + thumb + '.crt'
pkey = waagent.LibDir + '/' + thumb + '.prv'
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
waagent.SetFileContents(f.name,config['runtimeSettings'][0]['handlerSettings']['protectedSettings'])
cleartxt = None
cleartxt = waagent.RunGetOutput(self.patching.base64_path + " -d " + f.name + " | " + self.patching.openssl_path + " smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey)[1]
jctxt = {}
try:
jctxt = json.loads(cleartxt)
except:
self.error('JSON exception decoding ' + cleartxt)
handlerSettings['protectedSettings'] = jctxt
self.log('Config decoded correctly.')
return config
def do_parse_context(self, operation):
self.operation = operation
_context = self.try_parse_context()
getWaagentPathUsed = Utils.WAAgentUtil.GetPathUsed()
if(getWaagentPathUsed == 0):
self.log("waagent old path is used")
else:
self.log("waagent new path is used")
if not _context:
self.log("maybe no new settings file found")
sys.exit(0)
return _context
def try_parse_context(self):
self._context = HandlerContext(self._short_name)
handler_env = None
config = None
ctxt = None
code = 0
# get the HandlerEnvironment.json. According to the extension handler
# spec, it is always in the ./ directory
self.log('cwd is ' + os.path.realpath(os.path.curdir))
handler_env_file = './HandlerEnvironment.json'
if not os.path.isfile(handler_env_file):
self.error("Unable to locate " + handler_env_file)
return None
ctxt = waagent.GetFileContents(handler_env_file)
if ctxt == None :
self.error("Unable to read " + handler_env_file)
try:
handler_env = json.loads(ctxt)
except:
pass
if handler_env == None :
self.log("JSON error processing " + handler_env_file)
return None
if type(handler_env) == list:
handler_env = handler_env[0]
self._context._name = handler_env['name']
self._context._version = str(handler_env['version'])
self._context._config_dir = handler_env['handlerEnvironment']['configFolder']
self._context._log_dir = handler_env['handlerEnvironment']['logFolder']
self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'extension.log')
self.logging_file=self._context._log_file
self._context._shell_log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'shell.log')
self._change_log_file()
self._context._status_dir = handler_env['handlerEnvironment']['statusFolder']
self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile']
self._context._seq_no = self._get_current_seq_no(self._context._config_dir)
if self._context._seq_no < 0:
self.error("Unable to locate a .settings file!")
return None
self._context._seq_no = str(self._context._seq_no)
self.log('sequence number is ' + self._context._seq_no)
self._context._status_file = os.path.join(self._context._status_dir, self._context._seq_no + '.status')
self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings')
self.log("setting file path is" + self._context._settings_file)
ctxt = None
ctxt = waagent.GetFileContents(self._context._settings_file)
if ctxt == None :
error_msg = 'Unable to read ' + self._context._settings_file + '. '
self.error(error_msg)
return None
else:
if(self.operation is not None and self.operation.lower() == "enable"):
# we should keep the current status file
self.backup_settings_status_file(self._context._seq_no)
self._context._config = self._parse_config(ctxt)
return self._context
def _change_log_file(self):
self.log("Change log file to " + self._context._log_file)
waagent.LoggerInit(self._context._log_file,'/dev/stdout')
self._log = waagent.Log
self._error = waagent.Error
def save_seq(self):
self.set_last_seq(self._context._seq_no)
self.log("set most recent sequence number to " + self._context._seq_no)
def set_last_seq(self,seq):
waagent.SetFileContents('mrseq', str(seq))
def get_value_from_configfile(self, key):
global backup_logger
value = None
configfile = '/etc/azure/vmbackup.conf'
try :
if os.path.exists(configfile):
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_option('SnapshotThread',key):
value = config.get('SnapshotThread',key)
else:
self.log("Config File doesn't have the key :" + key, 'Info')
except Exception as e:
errorMsg = " Unable to get config file.key is "+ key +"with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.log(errorMsg, 'Warning')
return value
def set_value_to_configfile(self, key, value):
configfile = '/etc/azure/vmbackup.conf'
try :
self.log('setting doseq flag in config file', 'Info')
if not os.path.exists(os.path.dirname(configfile)):
os.makedirs(os.path.dirname(configfile))
config = ConfigParsers.RawConfigParser()
if os.path.exists(configfile):
config.read(configfile)
if config.has_section('SnapshotThread'):
if config.has_option('SnapshotThread', key):
config.remove_option('SnapshotThread', key)
else:
config.add_section('SnapshotThread')
else:
config.add_section('SnapshotThread')
config.set('SnapshotThread', key, value)
with open(configfile, 'w') as config_file:
config.write(config_file)
except Exception as e:
errorMsg = " Unable to set config file.key is "+ key +"with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.log(errorMsg, 'Warning')
return value
def get_machine_id(self):
machine_id_file = "/etc/azure/machine_identity_FD76C85E-406F-4CFA-8EB0-CF18B123358B"
machine_id = ""
try:
if not os.path.exists(os.path.dirname(machine_id_file)):
os.makedirs(os.path.dirname(machine_id_file))
if os.path.exists(machine_id_file):
file_pointer = open(machine_id_file, "r")
machine_id = file_pointer.readline()
file_pointer.close()
else:
mi = MachineIdentity()
machine_id = mi.stored_identity()[1:-1]
file_pointer = open(machine_id_file, "w")
file_pointer.write(machine_id)
file_pointer.close()
except Exception as e:
errMsg = 'Failed to retrieve the unique machine id with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg, False, 'Error')
self.log("Unique Machine Id : {0}".format(machine_id))
return machine_id
def get_total_used_size(self):
try:
df = subprocess.Popen(["df" , "-k" , "--output=source,fstype,size,used,avail,pcent,target"], stdout=subprocess.PIPE)
'''
Sample output of the df command
Filesystem Type 1K-blocks Used Avail Use% Mounted on
/dev/sda2 xfs 52155392 3487652 48667740 7% /
devtmpfs devtmpfs 7170976 0 7170976 0% /dev
tmpfs tmpfs 7180624 0 7180624 0% /dev/shm
tmpfs tmpfs 7180624 760496 6420128 11% /run
tmpfs tmpfs 7180624 0 7180624 0% /sys/fs/cgroup
/dev/sda1 ext4 245679 151545 76931 67% /boot
/dev/sdb1 ext4 28767204 2142240 25140628 8% /mnt/resource
/dev/mapper/mygroup-thinv1 xfs 1041644 33520 1008124 4% /bricks/brick1
/dev/mapper/mygroup-85197c258a54493da7880206251f5e37_0 xfs 1041644 33520 1008124 4% /run/gluster/snaps/85197c258a54493da7880206251f5e37/brick2
/dev/mapper/mygroup2-thinv2 xfs 15717376 5276944 10440432 34% /tmp/test
/dev/mapper/mygroup2-63a858543baf4e40a3480a38a2f232a0_0 xfs 15717376 5276944 10440432 34% /run/gluster/snaps/63a858543baf4e40a3480a38a2f232a0/brick2
tmpfs tmpfs 1436128 0 1436128 0% /run/user/1000
//Centos72test/cifs_test cifs 52155392 4884620 47270772 10% /mnt/cifs_test2
'''
process_wait_time = 30
while(process_wait_time >0 and df.poll() is None):
time.sleep(1)
process_wait_time -= 1
output = df.stdout.read()
output = output.split("\n")
total_used = 0
total_used_network_shares = 0
total_used_gluster = 0
network_fs_types = []
for i in range(1,len(output)-1):
device, fstype, size, used, available, percent, mountpoint = output[i].split()
self.log("Device name : {0} fstype : {1} size : {2} used space in KB : {3} available space : {4} mountpoint : {5}".format(device,fstype,size,used,available,mountpoint))
if "fuse" in fstype.lower() or "nfs" in fstype.lower() or "cifs" in fstype.lower():
if fstype not in network_fs_types :
network_fs_types.append(fstype)
self.log("Not Adding as network-drive, Device name : {0} used space in KB : {1} fstype : {2}".format(device,used,fstype))
total_used_network_shares = total_used_network_shares + int(used)
elif (mountpoint.startswith('/run/gluster/snaps/')):
self.log("Not Adding Device name : {0} used space in KB : {1} mount point : {2}".format(device,used,mountpoint))
total_used_gluster = total_used_gluster + int(used)
else:
self.log("Adding Device name : {0} used space in KB : {1} mount point : {2}".format(device,used,mountpoint))
total_used = total_used + int(used) #return in KB
if not len(network_fs_types) == 0:
HandlerUtility.add_to_telemetery_data("networkFSTypeInDf",str(network_fs_types))
HandlerUtility.add_to_telemetery_data("totalUsedNetworkShare",str(total_used_network_shares))
self.log("Total used space in Bytes of network shares : {0}".format(total_used_network_shares * 1024))
if total_used_gluster !=0 :
HandlerUtility.add_to_telemetery_data("glusterFSSize",str(total_used_gluster))
self.log("Total used space in Bytes : {0}".format(total_used * 1024))
return total_used * 1024,False #Converting into Bytes
except Exception as e:
errMsg = 'Unable to fetch total used space with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
return 0,True
def get_storage_details(self,total_size,failure_flag):
self.storageDetailsObj = Utils.Status.StorageDetails(self.partitioncount, total_size, False, failure_flag)
self.log("partition count : {0}, total used size : {1}, is storage space present : {2}, is size computation failed : {3}".format(self.storageDetailsObj.partitionCount, self.storageDetailsObj.totalUsedSizeInBytes, self.storageDetailsObj.isStoragespacePresent, self.storageDetailsObj.isSizeComputationFailed))
return self.storageDetailsObj
def SetExtErrorCode(self, extErrorCode):
if self.ExtErrorCode == ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success :
self.ExtErrorCode = extErrorCode
def do_status_json(self, operation, status, sub_status, status_code, message, telemetrydata, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj,total_size,failure_flag):
tstamp = time.strftime(DateTimeFormat, time.gmtime())
formattedMessage = Utils.Status.FormattedMessage("en-US",message)
stat_obj = Utils.Status.StatusObj(self._context._name, operation, status, sub_status, status_code, formattedMessage, telemetrydata, self.get_storage_details(total_size,failure_flag), self.get_machine_id(), taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj)
top_stat_obj = Utils.Status.TopLevelStatus(self._context._version, tstamp, stat_obj)
return top_stat_obj
def get_extension_version(self):
try:
cur_dir = os.getcwd()
cur_extension = cur_dir.split("/")[-1]
extension_version = cur_extension.split("-")[-1]
return extension_version
except Exception as e:
errMsg = 'Failed to retrieve the Extension version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
extension_version="Unknown"
return extension_version
def get_wala_version(self):
try:
file_pointer = open('/var/log/waagent.log','r')
waagent_version = ''
for line in file_pointer:
if 'Azure Linux Agent Version' in line:
waagent_version = line.split(':')[-1]
if waagent_version[:-1]=="": #for removing the trailing '\n' character
waagent_version = self.get_wala_version_from_command()
return waagent_version
else:
waagent_version = waagent_version[:-1].split("-")[-1] #getting only version number
return waagent_version
except Exception as e:
errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
waagent_version="Unknown"
return waagent_version
def get_wala_version_from_command(self):
try:
cur_dir = os.getcwd()
os.chdir("..")
p = subprocess.Popen(['/usr/sbin/waagent', '-version'], stdout=subprocess.PIPE)
process_wait_time = 30
while(process_wait_time > 0 and p.poll() is None):
time.sleep(1)
process_wait_time -= 1
out = p.stdout.read()
out = str(out)
out = out.split(" ")
waagent = out[0]
waagent_version = waagent.split("-")[-1] #getting only version number
os.chdir(cur_dir)
return waagent_version
except Exception as e:
errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
waagent_version="Unknown"
return waagent_version
def get_dist_info(self):
try:
if 'FreeBSD' in platform.system():
release = re.sub('\-.*\Z', '', str(platform.release()))
return "FreeBSD",release
if 'linux_distribution' in dir(platform):
distinfo = list(platform.linux_distribution(full_distribution_name=0))
# remove trailing whitespace in distro name
distinfo[0] = distinfo[0].strip()
return distinfo[0]+"-"+distinfo[1],platform.release()
else:
distinfo = platform.dist()
return distinfo[0]+"-"+distinfo[1],platform.release()
except Exception as e:
errMsg = 'Failed to retrieve the distinfo with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
return "Unkonwn","Unkonwn"
def substat_new_entry(self,sub_status,code,name,status,formattedmessage):
sub_status_obj = Utils.Status.SubstatusObj(code,name,status,formattedmessage)
sub_status.append(sub_status_obj)
return sub_status
def timedelta_total_seconds(self, delta):
if not hasattr(datetime.timedelta, 'total_seconds'):
return delta.days * 86400 + delta.seconds
else:
return delta.total_seconds()
@staticmethod
def add_to_telemetery_data(key,value):
HandlerUtility.telemetry_data[key]=value
def add_telemetry_data(self):
os_version,kernel_version = self.get_dist_info()
HandlerUtility.add_to_telemetery_data("guestAgentVersion",self.get_wala_version())
HandlerUtility.add_to_telemetery_data("extensionVersion",self.get_extension_version())
HandlerUtility.add_to_telemetery_data("osVersion",os_version)
HandlerUtility.add_to_telemetery_data("kernelVersion",kernel_version)
def convert_telemetery_data_to_bcm_serializable_format(self):
HandlerUtility.serializable_telemetry_data = []
for k,v in HandlerUtility.telemetry_data.items():
each_telemetry_data = {}
each_telemetry_data["Value"] = v
each_telemetry_data["Key"] = k
HandlerUtility.serializable_telemetry_data.append(each_telemetry_data)
def do_status_report(self, operation, status, status_code, message, taskId = None, commandStartTimeUTCTicks = None, snapshot_info = None,total_size = 0,failure_flag = True ):
self.log("{0},{1},{2},{3}".format(operation, status, status_code, message))
sub_stat = []
stat_rept = []
self.add_telemetry_data()
vm_health_obj = Utils.Status.VmHealthInfoObj(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode], int(self.ExtErrorCode))
self.convert_telemetery_data_to_bcm_serializable_format()
stat_rept = self.do_status_json(operation, status, sub_stat, status_code, message, HandlerUtility.serializable_telemetry_data, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj, total_size,failure_flag)
time_delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)
time_span = self.timedelta_total_seconds(time_delta) * 1000
date_place_holder = 'e2794170-c93d-4178-a8da-9bc7fd91ecc0'
stat_rept.timestampUTC = date_place_holder
date_string = r'\/Date(' + str((int)(time_span)) + r')\/'
stat_rept = "[" + json.dumps(stat_rept, cls = ComplexEncoder) + "]"
stat_rept = stat_rept.replace(date_place_holder,date_string)
# Add Status as sub-status for Status to be written on Status-File
sub_stat = self.substat_new_entry(sub_stat,'0',stat_rept,'success',None)
if self.get_public_settings()[CommonVariables.vmType].lower() == CommonVariables.VmTypeV2.lower() and CommonVariables.isTerminalStatus(status) :
status = CommonVariables.status_success
stat_rept_file = self.do_status_json(operation, status, sub_stat, status_code, message, None, taskId, commandStartTimeUTCTicks, None, None,total_size,failure_flag)
stat_rept_file = "[" + json.dumps(stat_rept_file, cls = ComplexEncoder) + "]"
# rename all other status files, or the WALA would report the wrong
# status file.
# because the wala choose the status file with the highest sequence
# number to report.
return stat_rept, stat_rept_file
def write_to_status_file(self, stat_rept_file):
try:
if self._context._status_file:
with open(self._context._status_file,'w+') as f:
f.write(stat_rept_file)
except Exception as e:
errMsg = 'Status file creation failed with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.log(errMsg)
def is_status_file_exists(self):
try:
if os.path.exists(self._context._status_file):
return True
else:
return False
except Exception as e:
self.log("exception is getting status file" + traceback.format_exc())
return False
def backup_settings_status_file(self, _seq_no):
self.log("current seq no is " + _seq_no)
for subdir, dirs, files in os.walk(self._context._config_dir):
for file in files:
try:
if(file.endswith('.settings') and file != (_seq_no + ".settings")):
new_file_name = file.replace(".","_")
os.rename(join(self._context._config_dir,file), join(self._context._config_dir,new_file_name))
except Exception as e:
self.log("failed to rename the status file.")
for subdir, dirs, files in os.walk(self._context._status_dir):
for file in files:
try:
if(file.endswith('.status') and file != (_seq_no + ".status")):
new_file_name = file.replace(".","_")
os.rename(join(self._context._status_dir,file), join(self._context._status_dir, new_file_name))
except Exception as e:
self.log("failed to rename the status file.")
def do_exit(self, exit_code, operation,status,code,message):
try:
HandlerUtility.add_to_telemetery_data("extErrorCode", str(ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode]))
self.do_status_report(operation, status,code,message)
except Exception as e:
self.log("Can't update status: " + str(e))
sys.exit(exit_code)
def get_handler_settings(self):
return self._context._config['runtimeSettings'][0]['handlerSettings']
def get_protected_settings(self):
return self.get_handler_settings().get('protectedSettings')
def get_public_settings(self):
return self.get_handler_settings().get('publicSettings')
def is_prev_in_transition(self):
curr_seq = self.get_last_seq()
last_seq = curr_seq - 1
if last_seq >= 0:
self.log("previous status and path: " + str(last_seq) + " " + str(self._context._status_dir))
status_file_prev = os.path.join(self._context._status_dir, str(last_seq) + '_status')
if os.path.isfile(status_file_prev) and os.access(status_file_prev, os.R_OK):
searchfile = open(status_file_prev, "r")
for line in searchfile:
if "Transition" in line:
self.log("transitioning found in the previous status file")
searchfile.close()
return True
searchfile.close()
return False
def get_prev_log(self):
with open(self._context._log_file, "r") as f:
lines = f.readlines()
if(len(lines) > 300):
lines = lines[-300:]
return ''.join(str(x) for x in lines)
else:
return ''.join(str(x) for x in lines)
def get_shell_script_log(self):
lines = ""
try:
with open(self._context._shell_log_file, "r") as f:
lines = f.readlines()
if(len(lines) > 10):
lines = lines[-10:]
return ''.join(str(x) for x in lines)
except Exception as e:
self.log("Can't receive shell log file: " + str(e))
return lines
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj,'convertToDictionary'):
return obj.convertToDictionary()
else:
return obj.__dict__
| |
#!/usr/bin/env python
# coding:utf-8
"""
GAEProxyHandler is the handler of http proxy port. default to 8087
if HTTP request:
do_METHOD()
elif HTTPS request:
do_CONNECT()
What is Direct mode:
if user access google site like www.google.com, client.google.com,
we don't need forward request to GAE server.
we can send the original request to google ip directly.
because most google ip act as general front server.
Youtube content server do not support direct mode.
look direct_handler.py for more detail.
What GAE mode:
Google App Engine support urlfetch for proxy.
every google account can apply 12 appid.
after deploy server code under gae_proxy/server/gae to GAE server, user can
use GAE server as http proxy.
Here is the global link view:
Browser => GAE_proxy => GAE server => target http/https server.
look gae_hander.py for more detail.
"""
import errno
import socket
import ssl
import urlparse
import re
import OpenSSL
NetWorkIOError = (socket.error, ssl.SSLError, OpenSSL.SSL.Error, OSError)
from xlog import getLogger
xlog = getLogger("gae_proxy")
import simple_http_client
import simple_http_server
from cert_util import CertUtil
from config import config
import gae_handler
import direct_handler
from connect_control import touch_active
import web_control
class GAEProxyHandler(simple_http_server.HttpServerHandler):
gae_support_methods = tuple(["GET", "POST", "HEAD", "PUT", "DELETE", "PATCH"])
# GAE don't support command like OPTION
bufsize = 256*1024
max_retry = 3
local_names = []
def setup(self):
self.__class__.do_GET = self.__class__.do_METHOD
self.__class__.do_PUT = self.__class__.do_METHOD
self.__class__.do_POST = self.__class__.do_METHOD
self.__class__.do_HEAD = self.__class__.do_METHOD
self.__class__.do_DELETE = self.__class__.do_METHOD
self.__class__.do_OPTIONS = self.__class__.do_METHOD
self.self_check_response_data = "HTTP/1.1 200 OK\r\n"\
"Access-Control-Allow-Origin: *\r\n"\
"Cache-Control: no-cache, no-store, must-revalidate\r\n"\
"Pragma: no-cache\r\n"\
"Expires: 0\r\n"\
"Content-Type: text/plain\r\n"\
"Content-Length: 2\r\n\r\nOK"
def forward_local(self):
"""
If browser send localhost:xxx request to GAE_proxy,
we forward it to localhost.
"""
host = self.headers.get('Host', '')
host_ip, _, port = host.rpartition(':')
http_client = simple_http_client.HTTP_client((host_ip, int(port)))
request_headers = dict((k.title(), v) for k, v in self.headers.items())
payload = b''
if 'Content-Length' in request_headers:
try:
payload_len = int(request_headers.get('Content-Length', 0))
payload = self.rfile.read(payload_len)
except Exception as e:
xlog.warn('forward_local read payload failed:%s', e)
return
self.parsed_url = urlparse.urlparse(self.path)
if len(self.parsed_url[4]):
path = '?'.join([self.parsed_url[2], self.parsed_url[4]])
else:
path = self.parsed_url[2]
content, status, response = http_client.request(self.command, path, request_headers, payload)
if not status:
xlog.warn("forward_local fail")
return
out_list = []
out_list.append("HTTP/1.1 %d\r\n" % status)
for key, value in response.getheaders():
key = key.title()
out_list.append("%s: %s\r\n" % (key, value))
out_list.append("\r\n")
out_list.append(content)
self.wfile.write("".join(out_list))
def send_method_allows(self, headers, payload):
xlog.debug("send method allow list for:%s %s", self.command, self.path)
# Refer: https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Preflighted_requests
response = \
"HTTP/1.1 200 OK\r\n"\
"Access-Control-Allow-Credentials: true\r\n"\
"Access-Control-Allow-Methods: GET, POST, HEAD, PUT, DELETE, PATCH\r\n"\
"Access-Control-Max-Age: 1728000\r\n"\
"Content-Length: 0\r\n"
req_header = headers.get("Access-Control-Request-Headers", "")
if req_header:
response += "Access-Control-Allow-Headers: %s\r\n" % req_header
origin = headers.get("Origin", "")
if origin:
response += "Access-Control-Allow-Origin: %s\r\n" % origin
else:
response += "Access-Control-Allow-Origin: *\r\n"
response += "\r\n"
self.wfile.write(response)
def is_local(self, hosts):
if 0 == len(self.local_names):
self.local_names.append('localhost')
self.local_names.append(socket.gethostname().lower());
try:
self.local_names.append(socket.gethostbyname_ex(socket.gethostname())[-1])
except socket.gaierror:
# TODO Append local IP address to local_names
pass
for s in hosts:
s = s.lower()
if s.startswith('127.') \
or s.startswith('192.168.') \
or s.startswith('10.') \
or s.startswith('169.254.') \
or s in self.local_names:
print s
return True
return False
def do_METHOD(self):
touch_active()
# record active time.
# backgroud thread will stop keep connection pool if no request for long time.
host = self.headers.get('Host', '')
host_ip, _, port = host.rpartition(':')
if host_ip == "127.0.0.1" and port == str(config.LISTEN_PORT):
controller = web_control.ControlHandler(self.client_address, self.headers, self.command, self.path, self.rfile, self.wfile)
if self.command == "GET":
return controller.do_GET()
elif self.command == "POST":
return controller.do_POST()
else:
xlog.warn("method not defined: %s", self.command)
return
if self.path[0] == '/' and host:
self.path = 'http://%s%s' % (host, self.path)
elif not host and '://' in self.path:
host = urlparse.urlparse(self.path).netloc
if self.is_local([host, host_ip]):
xlog.info("Browse localhost by proxy")
return self.forward_local()
if self.path == "http://www.twitter.com/xxnet":
xlog.debug("%s %s", self.command, self.path)
# for web_ui status page
# auto detect browser proxy setting is work
return self.wfile.write(self.self_check_response_data)
self.parsed_url = urlparse.urlparse(self.path)
if host in config.HOSTS_GAE:
return self.do_AGENT()
# redirect http request to https request
# avoid key word filter when pass through GFW
if host in config.HOSTS_FWD or host in config.HOSTS_DIRECT:
return self.wfile.write(('HTTP/1.1 301\r\nLocation: %s\r\nContent-Length: 0\r\n\r\n' % self.path.replace('http://', 'https://', 1)).encode())
if host.endswith(config.HOSTS_GAE_ENDSWITH):
return self.do_AGENT()
if host.endswith(config.HOSTS_FWD_ENDSWITH) or host.endswith(config.HOSTS_DIRECT_ENDSWITH):
return self.wfile.write(('HTTP/1.1 301\r\nLocation: %s\r\nContent-Length: 0\r\n\r\n' % self.path.replace('http://', 'https://', 1)).encode())
return self.do_AGENT()
# Called by do_METHOD and do_CONNECT_AGENT
def do_AGENT(self):
def get_crlf(rfile):
crlf = rfile.readline(2)
if crlf != "\r\n":
xlog.warn("chunk header read fail crlf")
request_headers = dict((k.title(), v) for k, v in self.headers.items())
payload = b''
if 'Content-Length' in request_headers:
try:
payload_len = int(request_headers.get('Content-Length', 0))
#xlog.debug("payload_len:%d %s %s", payload_len, self.command, self.path)
payload = self.rfile.read(payload_len)
except NetWorkIOError as e:
xlog.error('handle_method_urlfetch read payload failed:%s', e)
return
elif 'Transfer-Encoding' in request_headers:
# chunked, used by facebook android client
payload = ""
while True:
chunk_size_str = self.rfile.readline(65537)
chunk_size_list = chunk_size_str.split(";")
chunk_size = int("0x"+chunk_size_list[0], 0)
if len(chunk_size_list) > 1 and chunk_size_list[1] != "\r\n":
xlog.warn("chunk ext: %s", chunk_size_str)
if chunk_size == 0:
while True:
line = self.rfile.readline(65537)
if line == "\r\n":
break
else:
xlog.warn("entity header:%s", line)
break
payload += self.rfile.read(chunk_size)
get_crlf(self.rfile)
if self.command == "OPTIONS":
return self.send_method_allows(request_headers, payload)
if self.command not in self.gae_support_methods:
xlog.warn("Method %s not support in GAEProxy for %s", self.command, self.path)
return self.wfile.write(('HTTP/1.1 404 Not Found\r\n\r\n').encode())
xlog.debug("GAE %s %s", self.command, self.path)
gae_handler.handler(self.command, self.path, request_headers, payload, self.wfile)
def do_CONNECT(self):
if self.path != "https://www.twitter.com/xxnet":
touch_active()
host, _, port = self.path.rpartition(':')
if host in config.HOSTS_GAE:
return self.do_CONNECT_AGENT()
if host in config.HOSTS_DIRECT:
return self.do_CONNECT_DIRECT()
if host.endswith(config.HOSTS_GAE_ENDSWITH):
return self.do_CONNECT_AGENT()
if host.endswith(config.HOSTS_DIRECT_ENDSWITH):
return self.do_CONNECT_DIRECT()
return self.do_CONNECT_AGENT()
def do_CONNECT_AGENT(self):
"""send fake cert to client"""
# GAE supports the following HTTP methods: GET, POST, HEAD, PUT, DELETE, and PATCH
host, _, port = self.path.rpartition(':')
port = int(port)
certfile = CertUtil.get_cert(host)
# xlog.info('https GAE %s %s:%d ', self.command, host, port)
self.__realconnection = None
self.wfile.write(b'HTTP/1.1 200 OK\r\n\r\n')
try:
ssl_sock = ssl.wrap_socket(self.connection, keyfile=certfile, certfile=certfile, server_side=True)
except ssl.SSLError as e:
xlog.info('ssl error: %s, create full domain cert for host:%s', e, host)
certfile = CertUtil.get_cert(host, full_name=True)
return
except Exception as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET):
xlog.exception('ssl.wrap_socket(self.connection=%r) failed: %s path:%s, errno:%s', self.connection, e, self.path, e.args[0])
return
self.__realconnection = self.connection
self.__realwfile = self.wfile
self.__realrfile = self.rfile
self.connection = ssl_sock
self.rfile = self.connection.makefile('rb', self.bufsize)
self.wfile = self.connection.makefile('wb', 0)
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
xlog.warn("read request line len:%d", len(self.raw_requestline))
return
if not self.raw_requestline:
# xlog.warn("read request line empty")
return
if not self.parse_request():
xlog.warn("parse request fail:%s", self.raw_requestline)
return
except NetWorkIOError as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.EPIPE):
xlog.exception('ssl.wrap_socket(self.connection=%r) failed: %s path:%s, errno:%s', self.connection, e, self.path, e.args[0])
raise
if self.path[0] == '/' and host:
self.path = 'https://%s%s' % (self.headers['Host'], self.path)
if self.path == "https://www.twitter.com/xxnet":
# for web_ui status page
# auto detect browser proxy setting is work
xlog.debug("CONNECT %s %s", self.command, self.path)
return self.wfile.write(self.self_check_response_data)
try:
if self.path[0] == '/' and host:
self.path = 'http://%s%s' % (host, self.path)
elif not host and '://' in self.path:
host = urlparse.urlparse(self.path).netloc
self.parsed_url = urlparse.urlparse(self.path)
return self.do_AGENT()
except NetWorkIOError as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ETIMEDOUT, errno.EPIPE):
raise
finally:
if self.__realconnection:
try:
self.__realconnection.shutdown(socket.SHUT_WR)
self.__realconnection.close()
except NetWorkIOError:
pass
finally:
self.__realconnection = None
def do_CONNECT_DIRECT(self):
"""deploy fake cert to client"""
host, _, port = self.path.rpartition(':')
port = int(port)
if port != 443:
xlog.warn("CONNECT %s port:%d not support", host, port)
return
certfile = CertUtil.get_cert(host)
xlog.info('GAE %s %s:%d ', self.command, host, port)
self.__realconnection = None
self.wfile.write(b'HTTP/1.1 200 OK\r\n\r\n')
try:
ssl_sock = ssl.wrap_socket(self.connection, keyfile=certfile, certfile=certfile, server_side=True)
except ssl.SSLError as e:
xlog.info('ssl error: %s, create full domain cert for host:%s', e, host)
certfile = CertUtil.get_cert(host, full_name=True)
return
except Exception as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET):
xlog.exception('ssl.wrap_socket(self.connection=%r) failed: %s path:%s, errno:%s', self.connection, e, self.path, e.args[0])
return
self.__realconnection = self.connection
self.__realwfile = self.wfile
self.__realrfile = self.rfile
self.connection = ssl_sock
self.rfile = self.connection.makefile('rb', self.bufsize)
self.wfile = self.connection.makefile('wb', 0)
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
return
except NetWorkIOError as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.EPIPE):
raise
if self.path[0] == '/' and host:
self.path = 'https://%s%s' % (self.headers['Host'], self.path)
xlog.debug('GAE CONNECT Direct %s %s', self.command, self.path)
try:
if self.path[0] == '/' and host:
self.path = 'http://%s%s' % (host, self.path)
elif not host and '://' in self.path:
host = urlparse.urlparse(self.path).netloc
self.parsed_url = urlparse.urlparse(self.path)
if len(self.parsed_url[4]):
path = '?'.join([self.parsed_url[2], self.parsed_url[4]])
else:
path = self.parsed_url[2]
request_headers = dict((k.title(), v) for k, v in self.headers.items())
payload = b''
if 'Content-Length' in request_headers:
try:
payload_len = int(request_headers.get('Content-Length', 0))
#xlog.debug("payload_len:%d %s %s", payload_len, self.command, self.path)
payload = self.rfile.read(payload_len)
except NetWorkIOError as e:
xlog.error('handle_method_urlfetch read payload failed:%s', e)
return
direct_handler.handler(self.command, host, path, request_headers, payload, self.wfile)
except NetWorkIOError as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ETIMEDOUT, errno.EPIPE):
raise
finally:
if self.__realconnection:
try:
self.__realconnection.shutdown(socket.SHUT_WR)
self.__realconnection.close()
except NetWorkIOError:
pass
finally:
self.__realconnection = None
| |
import argparse
import json
import os
import readchar
import sys
import time
import readline
from subprocess import call, Popen, PIPE
from clint import resources
from clint.textui import puts, colored
targets = []
config_name = ''
# Time to sleep between transitions
TRANSITION_DELAY_TIME = 0.5
NUMBER_ENTRY_EXPIRE_TIME = 0.75
def main():
global config_name
# Check arguments
parser = argparse.ArgumentParser(prog='sshmenu',
description='A convenient tool for bookmarking '
'hosts and connecting to them via ssh.')
parser.add_argument('-c', '--configname', default='config', help='Specify an alternate configuration name.')
args = parser.parse_args()
# Get config name
config_name = '{configname}.json'.format(configname=args.configname)
# First parameter is 'company' name, hence duplicate arguments
resources.init('sshmenu', 'sshmenu')
# If the config file doesn't exist, create an example config
if resources.user.read(config_name) is None:
example_config = {
'targets': [
{
'host': 'user@example-machine.local',
'friendly': 'This is an example target',
'options': []
},
{
'command': 'mosh',
'host': 'user@example-machine.local',
'friendly': 'This is an example target using mosh',
'options': []
}
]
}
resources.user.write(config_name, json.dumps(example_config, indent=4))
update_targets()
display_menu()
def get_terminal_height():
# Return height of terminal as int
tput = Popen(['tput', 'lines'], stdout=PIPE)
height, stderr = tput.communicate()
return int(height)
def display_help():
# Clear screen and show the help text
call(['clear'])
puts(colored.cyan('Available commands (press any key to exit)'))
puts(' enter - Connect to your selection')
puts(' crtl+c | q - Quit sshmenu')
puts(' k (up) - Move your selection up')
puts(' j (down) - Move your selection down')
puts(' h - Show help menu')
puts(' c - Create new connection')
puts(' d - Delete connection')
puts(' e - Edit connection')
puts(' + (plus) - Move connection up')
puts(' - (minus) - Move connection down')
# Hang until we get a keypress
readchar.readkey()
def connection_create():
global config_name
call(['clear'])
puts(colored.cyan('Create new connection entry'))
puts('')
host = input('Hostname (user@machine): ')
if host is '':
puts('')
puts('Nothing done')
time.sleep(TRANSITION_DELAY_TIME)
return
friendly = input('Description []: ')
command = input('Command [ssh]: ')
options = input('Command Options []: ')
# Set the defaults if our input was empty
command = 'ssh' if command == '' else command
options = [] if options == '' else options.split()
# Append the new target to the config
config = json.loads(resources.user.read(config_name))
config['targets'].append({'command': command, 'host': host, 'friendly': friendly, 'options': options})
# Save the new config
resources.user.write(config_name, json.dumps(config, indent=4))
update_targets()
puts('')
puts('New connection added')
time.sleep(TRANSITION_DELAY_TIME)
def connection_edit(selected_target):
global targets, config_name
call(['clear'])
puts(colored.cyan('Editing connection %s' % targets[selected_target]['host']))
puts('')
target = targets[selected_target]
while True:
host = input_prefill('Hostname: ', target['host'])
if host is not '':
break
friendly = input_prefill('Description: ', target['friendly'])
command = input_prefill('Command [ssh]: ', 'ssh' if not target.get('command') else target['command'])
options = input_prefill('Options []: ', ' '.join(target['options']))
# Set the defaults if our input was empty
command = 'ssh' if command == '' else command
options = [] if options == '' else options.split()
# Delete the old entry insert the edited one in its place
config = json.loads(resources.user.read(config_name))
del config['targets'][selected_target]
config['targets'].insert(selected_target,
{'command': command, 'host': host, 'friendly': friendly, 'options': options})
resources.user.write(config_name, json.dumps(config, indent=4))
update_targets()
puts('')
puts('Changes saved')
time.sleep(TRANSITION_DELAY_TIME)
def connection_delete(selected_target):
global targets, config_name
call(['clear'])
puts(colored.red('Delete connection entry for %s' % targets[selected_target]['host']))
puts('')
while True:
response = input('Are you sure you want to delete this connection [yes|NO]: ').lower()
if response == 'no' or response == 'n' or response == '':
puts('')
puts('Nothing done')
break
if response == 'yes':
config = json.loads(resources.user.read(config_name))
del config['targets'][selected_target]
resources.user.write(config_name, json.dumps(config, indent=4))
update_targets()
puts('')
puts('Connection deleted')
break
time.sleep(TRANSITION_DELAY_TIME)
def connection_move_up(selected_target):
global config_name
config = json.loads(resources.user.read(config_name))
config['targets'].insert(selected_target - 1, config['targets'].pop(selected_target))
resources.user.write(config_name, json.dumps(config, indent=4))
update_targets()
def connection_move_down(selected_target):
global config_name
config = json.loads(resources.user.read(config_name))
config['targets'].insert(selected_target + 1, config['targets'].pop(selected_target))
resources.user.write(config_name, json.dumps(config, indent=4))
update_targets()
def update_targets():
global targets, config_name
config = json.loads(resources.user.read(config_name))
if 'targets' in config:
targets = config['targets']
def display_menu():
global targets
# Save current cursor position so we can overwrite on list updates
call(['tput', 'clear', 'sc'])
# Keep track of currently selected target
selected_target = 0
# Support input of long numbers
number_buffer = []
# Store time of last number that was entered
time_last_digit_pressed = round(time.time())
# Get initial terminal height
terminal_height = get_terminal_height()
# Set initial visible target range.
# Subtract 2 because one line is used by the instructions,
# and one line is always empty at the bottom.
visible_target_range = range(terminal_height - 2)
while True:
# Return to the saved cursor position
call(['tput', 'clear', 'rc'])
# We need at least one target for our UI to make sense
num_targets = len(targets)
if num_targets <= 0:
puts(colored.red('Whoops, you don\'t have any connections defined in your config!'))
puts('')
puts('Press "c" to create a new connection')
else:
puts(colored.cyan('Select a target (press "h" for help)'))
# Determine the longest host
longest_host = -1
longest_line = -1
for index, target in enumerate(targets):
length = len(target['host'])
# Check host length
if length > longest_host:
longest_host = length
# Generate description and check line length
for index, target in enumerate(targets):
desc = target['host'].ljust(longest_host) + ' | ' + target['friendly']
target['desc'] = desc
line_length = len(desc)
if line_length > longest_line:
longest_line = line_length
# Recalculate visible targets based on selected_target
if selected_target > max(visible_target_range):
visible_start = selected_target - terminal_height + 3
visible_end = selected_target + 1
visible_target_range = range(visible_start, visible_end)
elif selected_target < min(visible_target_range):
visible_start = selected_target
visible_end = selected_target + terminal_height - 2
visible_target_range = range(visible_start, visible_end)
# Make sure our selected target is not higher than possible
# This can happen if you delete the last target
selected_target = selected_target if selected_target < num_targets else 0
# Used to pad out the line numbers so that we can keep everything aligned
num_digits = len(str(num_targets))
digits_format_specifier = '%' + str(num_digits) + 'd'
# Print items
for index, target in enumerate(targets):
# Only print the items that are within the visible range.
# Due to lines changing their position on the screen when scrolling,
# we need to redraw the entire line + add padding to make sure all
# traces of the previous line are erased.
if index in visible_target_range:
line = (digits_format_specifier + '. %s ') % (index + 1, target['desc'].ljust(longest_line))
if index == selected_target:
puts(colored.green(' -> %s' % line))
else:
puts(colored.white(' %s' % line))
# Hang until we get a keypress
key = readchar.readkey()
if key == readchar.key.UP or key == 'k' and num_targets > 0:
# Ensure the new selection would be valid & reset number input buffer
if (selected_target - 1) >= 0:
selected_target -= 1
number_buffer = []
elif key == readchar.key.DOWN or key == 'j' and num_targets > 0:
# Ensure the new selection would be valid & reset number input buffer
if (selected_target + 1) <= (num_targets - 1):
selected_target += 1
number_buffer = []
elif key == 'g':
# Go to top & reset number input buffer
selected_target = 0
number_buffer = []
elif key == 'G':
# Go to bottom & reset number input buffer
selected_target = num_targets - 1
number_buffer = []
# Check if key is a number
elif key in map(lambda x: str(x), range(10)):
current_time = time.time()
if current_time - time_last_digit_pressed >= NUMBER_ENTRY_EXPIRE_TIME:
number_buffer = []
time_last_digit_pressed = current_time
number_buffer += key
new_selection = int(''.join(number_buffer))
# If the new target is invalid, just keep the previously selected target instead
if num_targets >= new_selection > 0:
selected_target = new_selection - 1
elif key == readchar.key.ENTER and num_targets > 0:
# For cleanliness clear the screen
call(['tput', 'clear'])
target = targets[selected_target]
# Check if there is a custom command for this target
if 'command' in target.keys():
command = target['command']
else:
command = 'ssh'
# Arguments to the child process should start with the name of the command being run
args = [command] + target.get('options', []) + [target['host']]
try:
# After this line, ssh will replace the python process
os.execvp(command, args)
except FileNotFoundError:
sys.exit('Command not found: {commandname}'.format(commandname=command))
elif key == 'h':
display_help()
elif key == 'c':
connection_create()
elif key == 'd' and num_targets > 0:
connection_delete(selected_target)
elif key == 'e' and num_targets > 0:
connection_edit(selected_target)
elif key == '-' and num_targets > 0:
if selected_target < num_targets:
connection_move_down(selected_target)
selected_target += 1
elif key == '+' and num_targets > 0:
if selected_target > 0:
connection_move_up(selected_target)
selected_target -= 1
elif key == readchar.key.CTRL_C or key == 'q':
exit(0)
def input_prefill(prompt, text):
def hook():
readline.insert_text(text)
readline.redisplay()
readline.set_pre_input_hook(hook)
result = input(prompt)
readline.set_pre_input_hook()
return result
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Base class for all drawbot drivers
Author: tennessee
Created on: 2017-03-21
Copyright 2017, Tennessee Carmel-Veilleux.
"""
import threading
import Queue
import logging
import json
import numpy as np
from collections import deque, namedtuple
class DrawbotDriverException(IOError):
pass
class DrawbotCommand(object):
pass
class DrawbotPenUp(DrawbotCommand):
def __init__(self, height_mm=10.0):
self._height_mm = height_mm
@property
def height_mm(self):
return self._height_mm
class DrawbotPenDown(DrawbotCommand):
def __init__(self, height_mm=0.0):
self._height_mm = height_mm
@property
def height_mm(self):
return self._height_mm
class DrawbotPenGoto(DrawbotCommand):
def __init__(self, position, is_native):
self._position = np.asarray(position, dtype="float64")
self._is_native = is_native
@property
def position(self):
return self._position
@property
def is_native(self):
return self._is_native
class DrawbotDrawPath(DrawbotCommand):
def __init__(self, path_points, is_native=False):
self._path_points = np.asarray(path_points, dtype="float64")
self._is_native = is_native
@property
def path_points(self):
return self._path_points
@property
def is_native(self):
return self._is_native
class DrawbotAbort(DrawbotCommand):
def __init__(self):
pass
class DrawbotDriver(object):
def __init__(self, drawbot_kinematics, *args, **kwargs):
self._drawbot_kine = drawbot_kinematics
self._connected = False
# Hint for delay between point updates
self._point_delay_sec = kwargs.get("point_delay_ms", 0.01)
self._pen_diameter_mm = kwargs.get("pen_diameter_mm", 0.25)
self._thread = threading.Thread(target=self._process, name=kwargs.get("thread_name", "drawbot_driver"))
self._thread.daemon = kwargs.get("daemon", True)
self._running = False
self._queue = Queue.Queue()
self._drawing_prog = deque()
self._logger = logging.getLogger("drawbot_driver")
@property
def connected(self):
return self._connected
@property
def point_delay_sec(self):
return self._point_delay_sec
@point_delay_sec.setter
def point_delay_sec(self, value):
self._queue.put({"cmd": "set_point_delay_sec", "value": value})
@property
def pen_diameter_mm(self):
return self._pen_diameter_mm
@property
def kine(self):
return self._drawbot_kine
def connect(self, **kwargs):
if self._connected:
self._logger.warn("Already connected, ignoring!")
return
if not self._thread.is_alive():
self._running = True
self.connect_impl()
self._thread.start()
def disconnect(self):
if not self._connected:
pass
def abort_path(self):
self._queue.put(DrawbotAbort())
def pen_up(self, height_mm=10.0):
self._queue.put(DrawbotPenUp(height_mm=height_mm))
def pen_down(self, height_mm=0.0):
self._queue.put(DrawbotPenDown(height_mm=height_mm))
def goto(self, position, is_native=False):
self._queue.put(DrawbotPenGoto(position, is_native=is_native))
def draw_path(self, path_points, is_native=False):
self._queue.put(DrawbotDrawPath(path_points, is_native=is_native))
def shutdown(self, timeout=1.0):
if self._thread.is_alive():
self._queue.put(False)
self._thread.join(timeout)
if self._thread.is_alive():
self._logger.error("Could not join robot driver thread to shutdown!")
def _process(self):
self._logger.info("Drawbot driver thread started")
while self._running:
cmd = None
try:
cmd = self._queue.get(block=True,timeout=self._point_delay_sec)
except Queue.Empty:
# No new command, run next robot command
if len(self._drawing_prog) == 0:
continue
drawing_cmd = self._drawing_prog.popleft()
self._execute(drawing_cmd)
# If no command to execute, go back to waiting
if cmd is None:
continue
if cmd is False:
# Shutdown requested
self._running = False
continue
# Handle all commands
if isinstance(cmd, DrawbotDrawPath):
path = cmd.path_points
is_native = cmd.is_native
# Convert to natives
if not is_native:
points, path = self._drawbot_kine.gen_path(path, self._pen_diameter_mm / 2.0)
is_native = True
for idx in xrange(path.shape[0]):
self._drawing_prog.append({"cmd": ("goto_native" if is_native else "goto_point"), "point": path[idx,:]})
elif isinstance(cmd, DrawbotPenUp):
self._drawing_prog.append({"cmd": "pen_up", "height_mm": cmd.height_mm})
elif isinstance(cmd, DrawbotPenDown):
self._drawing_prog.append({"cmd": "pen_down", "height_mm": cmd.height_mm})
elif isinstance(cmd, DrawbotPenGoto):
is_native = cmd.is_native
# Convert to natives
if not is_native:
try:
# Find IK
natives = self._drawbot_kine.inverse_kine(cmd.position)
is_native = True
except:
self._logger.exception("Could not go ot native!")
continue
else:
natives = cmd.position
self._drawing_prog.append(
{"cmd": ("goto_native" if is_native else "goto_point"), "point": natives})
elif isinstance(cmd, DrawbotAbort):
self._drawing_prog.append({"cmd": "abort"})
self._logger.info("Drawbot driver thread exited")
return
def _execute(self, drawing_cmd):
if drawing_cmd["cmd"] == "goto_point":
self._logger.info("goto_point: %s", drawing_cmd["point"])
elif drawing_cmd["cmd"] == "goto_native":
#sself._logger.info("goto_native: %s", drawing_cmd["point"])
self.set_natives_impl(drawing_cmd["point"])
elif drawing_cmd["cmd"] in ("pen_up", "pen_down"):
self.set_pen_height_impl(height_mm=drawing_cmd["height_mm"])
else:
self._logger.warn("Unknown command: %s", drawing_cmd)
def connect_impl(self):
raise NotImplementedError()
def disconnect_impl(self):
raise NotImplementedError()
def set_pen_height_impl(self, height_mm):
raise NotImplementedError()
def set_natives_impl(self, natives):
raise NotImplementedError()
class DrawbotKinematics(object):
"""
"""
def __init__(self, *args, **kwargs):
self.work_area_config = {}
pass
def get_kine_hash(self):
"""
Return a driver-specific hash given the configuration of the robot, which can be used to generate
a cache key.
:return: A driver-specific hash string or None if no hash can be computed
"""
return None
def load_work_area_config(self):
"""
Try to load work area config from cached file
:return: True if work area config was loaded, False otherwise
"""
hash = self.get_kine_hash()
if hash is None:
return False
try:
with open("%s_%s_cache.json" % (self.__class__.__name__, hash), "rb") as cache_file:
work_area_config = json.load(cache_file)
self.work_area_config.update(work_area_config)
return True
except IOError:
return False
def save_work_area_config(self):
"""
Try to save work area config into cache file
"""
hash = self.get_kine_hash()
with open("%s_%s_cache.json" % (self.__class__.__name__, hash), "wb+") as cache_file:
json.dump(self.work_area_config, cache_file, indent=2)
def respects_external_constraints(self, end_point, thetas):
return False
def forward_kine(self, thetas, **kwargs):
# Position of end-effector
end_points = np.zeros(thetas.shape)
# Other columns (e.g. phi1, phi2, cx, cy), stacked at the end of the end_points. Could be as many columns as you want
internal_stuff = np.zeros((thetas.shape[0], 4))
return np.column_stack((end_points, internal_stuff))
def inverse_kine(self, end_points, **kwargs):
# end_points: array of points of end effector (x, y) for which to find the natives
natives = np.zeros(end_points.shape)
# Other columns (e.g. phi1, phi2, cx, cy), stacked at the end of the end_points. Could be as many columns as you want
internal_stuff = np.zeros((end_points.shape[0], 4))
if self.respects_external_constraints(end_points, natives):
return np.column_stack((natives, internal_stuff))
else:
raise ValueError("No reverse kine solution found")
# Return an array of only the points that fit the IK of the robot
def trim_unfeasible(self, points):
feasible_points = []
for point in points:
try:
self.inverse_kine(point)
feasible_points.append(points)
except ValueError:
feasible_points.append(point)
return np.asarray(feasible_points)
def get_work_area(self, spatial_res_mm=0.5, theta_res_rad=0.05, **kwargs):
# Samples of the work area
work_points = []
# List of thetas scanned to have been feasible
work_natives = []
# Closed path around work area
work_path = []
return work_points, work_natives, work_path
def get_work_area_centroid(self):
return np.asarray(self.work_area_config.get("work_centroid", [0.0, 0.0]))
def draw_robot_preview(self, ax, show_robot=False, show_work_area=True, **kwargs):
"""
Draw preview of robot and work area on matplotlib axis
:param ax:
:param show_robot:
:param show_work_area:
"""
pass
| |
from _collections import defaultdict
import datetime
from urllib import urlencode
import dateutil
from django.core.urlresolvers import reverse
import math
from django.db.models.aggregates import Max, Min, Avg, StdDev, Count
import numpy
import operator
import pytz
from corehq.apps.es.forms import FormES
from corehq.apps.reports import util
from corehq.apps.reports.filters.users import ExpandedMobileWorkerFilter as EMWF
from corehq.apps.reports.standard import ProjectReportParametersMixin, \
DatespanMixin, ProjectReport, DATE_FORMAT
from corehq.apps.reports.filters.forms import CompletionOrSubmissionTimeFilter, FormsByApplicationFilter, SingleFormByApplicationFilter, MISSING_APP_ID
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType, DataTablesColumnGroup
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.util import make_form_couch_key, friendly_timedelta, format_datatables_data
from corehq.apps.sofabed.models import FormData
from corehq.apps.users.models import CommCareUser
from corehq.elastic import es_query, ADD_TO_ES_FILTER
from corehq.pillows.mappings.case_mapping import CASE_INDEX
from corehq.pillows.mappings.xform_mapping import XFORM_INDEX
from dimagi.utils.couch.database import get_db
from dimagi.utils.dates import DateSpan, today_or_tomorrow
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.parsing import json_format_datetime
from dimagi.utils.timezones import utils as tz_utils
from dimagi.utils.web import get_url_base
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
class WorkerMonitoringReportTableBase(GenericTabularReport, ProjectReport, ProjectReportParametersMixin):
exportable = True
def get_raw_user_link(self, user):
from corehq.apps.reports.standard.cases.basic import CaseListReport
user_link_template = '<a href="%(link)s?%(params)s">%(username)s</a>'
user_link = user_link_template % {
'link': "%s%s" % (get_url_base(),
CaseListReport.get_url(domain=self.domain)),
'params': urlencode(EMWF.for_user(user.user_id)),
'username': user.username_in_report,
}
return user_link
def get_user_link(self, user):
user_link = self.get_raw_user_link(user)
return self.table_cell(user.raw_username, user_link)
class MultiFormDrilldownMixin(object):
"""
This is a useful mixin when you use FormsByApplicationFilter.
"""
@property
@memoized
def all_relevant_forms(self):
return FormsByApplicationFilter.get_value(self.request, self.domain)
class CompletionOrSubmissionTimeMixin(object):
"""
Use this when you use CompletionOrSubmissionTimeFilter.
"""
@property
def by_submission_time(self):
value = CompletionOrSubmissionTimeFilter.get_value(self.request, self.domain)
return value == 'submission'
class CaseActivityReport(WorkerMonitoringReportTableBase):
"""
todo move this to the cached version when ready
User Last 30 Days Last 60 Days Last 90 Days Active Clients Inactive Clients
danny 5 (25%) 10 (50%) 20 (100%) 17 6
(name) (modified_since(x)/[active + closed_since(x)]) (open & modified_since(120)) (open & !modified_since(120))
"""
name = ugettext_noop('Case Activity')
slug = 'case_activity'
fields = ['corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',
'corehq.apps.reports.filters.select.CaseTypeFilter']
all_users = None
display_data = ['percent']
emailable = True
description = ugettext_noop("Followup rates on active cases.")
is_cacheable = True
@property
def special_notice(self):
if self.domain_object.case_sharing_included():
return _("This report currently does not support case sharing. "
"There might be inconsistencies in case totals if the user is part of a case sharing group. "
"We are working to correct this shortly.")
class Row(object):
def __init__(self, report, user):
self.report = report
self.user = user
@memoized
def active_count(self):
"""Open clients seen in the last 120 days"""
return self.report.get_number_cases(
user_id=self.user.user_id,
modified_after=self.report.utc_now - self.report.milestone,
modified_before=self.report.utc_now,
closed=False,
)
@memoized
def inactive_count(self):
"""Open clients not seen in the last 120 days"""
return self.report.get_number_cases(
user_id=self.user.user_id,
modified_before=self.report.utc_now - self.report.milestone,
closed=False,
)
def modified_count(self, startdate=None, enddate=None):
enddate = enddate or self.report.utc_now
return self.report.get_number_cases(
user_id=self.user.user_id,
modified_after=startdate,
modified_before=enddate,
)
def closed_count(self, startdate=None, enddate=None):
enddate = enddate or self.report.utc_now
return self.report.get_number_cases(
user_id=self.user.user_id,
modified_after=startdate,
modified_before=enddate,
closed=True
)
def header(self):
return self.report.get_user_link(self.user)
class TotalRow(object):
def __init__(self, rows, header):
self.rows = rows
self._header = header
def active_count(self):
return sum([row.active_count() for row in self.rows])
def inactive_count(self):
return sum([row.inactive_count() for row in self.rows])
def modified_count(self, startdate=None, enddate=None):
return sum([row.modified_count(startdate, enddate) for row in self.rows])
def closed_count(self, startdate=None, enddate=None):
return sum([row.closed_count(startdate, enddate) for row in self.rows])
def header(self):
return self._header
_default_landmarks = [30, 60, 90]
@property
@memoized
def landmarks(self):
landmarks_param = self.request_params.get('landmarks')
landmarks_param = landmarks_param if isinstance(landmarks_param, list) else []
landmarks_param = [param for param in landmarks_param if isinstance(param, int)]
landmarks = landmarks_param if landmarks_param else self._default_landmarks
return [datetime.timedelta(days=l) for l in landmarks]
_default_milestone = 120
@property
@memoized
def milestone(self):
milestone_param = self.request_params.get('milestone')
milestone_param = milestone_param if isinstance(milestone_param, int) else None
milestone = milestone_param if milestone_param else self._default_milestone
return datetime.timedelta(days=milestone)
@property
@memoized
def utc_now(self):
return tz_utils.adjust_datetime_to_timezone(datetime.datetime.utcnow(), self.timezone.zone, pytz.utc.zone)
@property
def headers(self):
columns = [DataTablesColumn(_("Users"))]
for landmark in self.landmarks:
num_cases = DataTablesColumn(_("# Modified or Closed"), sort_type=DTSortType.NUMERIC,
help_text=_("The number of cases that have been modified between %d days ago and today.") % landmark.days
)
num_active = DataTablesColumn(_("# Active"), sort_type=DTSortType.NUMERIC,
help_text=_("The number of cases created or modified in the last 120 days.")
)
num_closed = DataTablesColumn(_("# Closed"), sort_type=DTSortType.NUMERIC,
help_text=_("The number of cases that have been closed between %d days ago and today.") % landmark.days
)
proportion = DataTablesColumn(_("Proportion"), sort_type=DTSortType.NUMERIC,
help_text=_("The percentage of all recently active cases that were modified or closed in the last %d days.") % landmark.days
)
columns.append(DataTablesColumnGroup(_("Cases in Last %s Days") % landmark.days if landmark else _("Ever"),
num_cases,
num_active,
num_closed,
proportion
))
columns.append(DataTablesColumn(_("# Active Cases"),
sort_type=DTSortType.NUMERIC,
help_text=_('Number of cases modified in the last %s days that are still open') % self.milestone.days))
columns.append(DataTablesColumn(_("# Inactive Cases"),
sort_type=DTSortType.NUMERIC,
help_text=_("Number of cases that are open but haven't been touched in the last %s days") % self.milestone.days))
return DataTablesHeader(*columns)
@property
def rows(self):
users_data = EMWF.pull_users_and_groups(
self.domain, self.request, True, True)
rows = [self.Row(self, user) for user in users_data.combined_users]
total_row = self.TotalRow(rows, _("All Users"))
def format_row(row):
cells = [row.header()]
def add_numeric_cell(text, value=None):
if value is None:
try:
value = int(text)
except ValueError:
value = text
cells.append(util.format_datatables_data(text=text, sort_key=value))
for landmark in self.landmarks:
value = row.modified_count(self.utc_now - landmark)
active = row.active_count()
closed = row.closed_count(self.utc_now - landmark)
total = active + closed
try:
p_val = float(value) * 100. / float(total)
proportion = '%.f%%' % p_val
except ZeroDivisionError:
p_val = None
proportion = '--'
add_numeric_cell(value, value)
add_numeric_cell(active, active)
add_numeric_cell(closed, closed)
add_numeric_cell(proportion, p_val)
add_numeric_cell(row.active_count())
add_numeric_cell(row.inactive_count())
return cells
self.total_row = format_row(total_row)
return map(format_row, rows)
def get_number_cases(self, user_id, modified_after=None, modified_before=None, closed=None):
key = [self.domain, {} if closed is None else closed, self.case_type or {}, user_id]
if modified_after is None:
start = ""
else:
start = json_format_datetime(modified_after)
if modified_before is None:
end = {}
else:
end = json_format_datetime(modified_before)
return get_db().view('case/by_date_modified',
startkey=key + [start],
endkey=key + [end],
group=True,
group_level=0,
wrapper=lambda row: row['value']
).one() or 0
class SubmissionsByFormReport(WorkerMonitoringReportTableBase,
MultiFormDrilldownMixin, DatespanMixin):
name = ugettext_noop("Submissions By Form")
slug = "submissions_by_form"
fields = [
'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',
'corehq.apps.reports.filters.forms.FormsByApplicationFilter',
'corehq.apps.reports.filters.dates.DatespanFilter'
]
fix_left_col = True
emailable = True
is_cacheable = True
description = ugettext_noop("Number of submissions by form.")
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn(_("User"), span=3))
if not self.all_relevant_forms:
headers.add_column(
DataTablesColumn(
_("No submissions were found for selected forms "
"within this date range."),
sortable=False
)
)
else:
for _form, info in self.all_relevant_forms.items():
help_text = None
if info['is_fuzzy']:
help_text = _("This column shows Fuzzy Submissions.")
elif info['is_remote']:
help_text = _("These forms came from "
"a Remote CommCare HQ Application.")
headers.add_column(
DataTablesColumn(
info['name'],
sort_type=DTSortType.NUMERIC,
help_text=help_text,
)
)
headers.add_column(
DataTablesColumn(_("All Forms"), sort_type=DTSortType.NUMERIC)
)
return headers
@property
def rows(self):
rows = []
totals = [0] * (len(self.all_relevant_forms) + 1)
users_data = EMWF.pull_users_and_groups(
self.domain, self.request, True, True)
for user in users_data.combined_users:
row = []
if self.all_relevant_forms:
for form in self.all_relevant_forms.values():
row.append(
self._get_num_submissions(
user.user_id, form['xmlns'], form['app_id'])
)
row_sum = sum(row)
row = (
[self.get_user_link(user)] +
[self.table_cell(row_data) for row_data in row] +
[self.table_cell(row_sum, "<strong>%s</strong>" % row_sum)]
)
totals = [totals[i] + col.get('sort_key')
for i, col in enumerate(row[1:])]
rows.append(row)
else:
rows.append([self.get_user_link(user), '--'])
if self.all_relevant_forms:
self.total_row = [_("All Users")] + totals
return rows
def _get_num_submissions(self, user_id, xmlns, app_id):
key = make_form_couch_key(self.domain, user_id=user_id, xmlns=xmlns,
app_id=app_id)
data = get_db().view(
'reports_forms/all_forms',
reduce=True,
startkey=key + [self.datespan.startdate_param_utc],
endkey=key + [self.datespan.enddate_param_utc],
).first()
return data['value'] if data else 0
@memoized
def forms_per_user(self, app_id, xmlns):
# todo: this seems to not work properly
# needs extensive QA before being used
query = (FormES()
.domain(self.domain)
.xmlns(xmlns)
.submitted(gt=self.datespan.startdate_utc,
lte=self.datespan.enddate_utc)
.size(0)
.user_facet())
if app_id and app_id != MISSING_APP_ID:
query = query.app(app_id)
res = query.run()
return res.facets.user.counts_by_term()
class DailyFormStatsReport(WorkerMonitoringReportTableBase, CompletionOrSubmissionTimeMixin, DatespanMixin):
slug = "daily_form_stats"
name = ugettext_noop("Daily Form Activity")
fields = [
'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',
'corehq.apps.reports.filters.forms.CompletionOrSubmissionTimeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
]
description = ugettext_noop("Number of submissions per day.")
fix_left_col = False
emailable = True
is_cacheable = False
ajax_pagination = True
@property
@memoized
def dates(self):
date_list = [self.datespan.startdate]
while date_list[-1] < self.datespan.enddate:
date_list.append(date_list[-1] + datetime.timedelta(days=1))
return date_list
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn(_("Username"), span=3))
for d in self.dates:
headers.add_column(DataTablesColumn(d.strftime(DATE_FORMAT), sort_type=DTSortType.NUMERIC))
headers.add_column(DataTablesColumn(_("Total"), sort_type=DTSortType.NUMERIC))
return headers
@property
def date_field(self):
return 'received_on' if self.by_submission_time else 'time_end'
@property
def startdate(self):
return self.datespan.startdate_utc if self.by_submission_time else self.datespan.startdate
@property
def enddate(self):
return self.datespan.enddate_utc if self.by_submission_time else self.datespan.enddate_adjusted
def date_filter(self, start, end):
return {'%s__range' % self.date_field: (start, end)}
@property
def shared_pagination_GET_params(self):
params = [
dict(
name=EMWF.slug,
value=EMWF.get_value(self.request, self.domain)),
dict(
name=CompletionOrSubmissionTimeFilter.slug,
value=CompletionOrSubmissionTimeFilter.get_value(self.request, self.domain)),
dict(name='startdate', value=self.datespan.startdate_display),
dict(name='enddate', value=self.datespan.enddate_display),
]
return params
@property
def total_records(self):
return len(self.all_users)
@property
@memoized
def all_users(self):
fields = ['_id', 'username', 'first_name', 'last_name', 'doc_type', 'is_active', 'email']
users = EMWF.user_es_query(self.domain, self.request).fields(fields)\
.run().hits
users = map(util._report_user_dict, users)
return sorted(users, key=lambda u: u['username_in_report'])
def paginate_list(self, data_list):
if self.pagination:
start = self.pagination.start
end = start + self.pagination.count
return data_list[start:end]
else:
return data_list
def users_by_username(self, order):
users = self.all_users
if order == "desc":
users.reverse()
return self.paginate_list(users)
def users_by_range(self, start, end, order):
results = FormData.objects \
.filter(doc_type='XFormInstance') \
.filter(**self.date_filter(start, end)) \
.values('user_id') \
.annotate(Count('user_id'))
count_dict = dict((result['user_id'], result['user_id__count']) for result in results)
return self.users_sorted_by_count(count_dict, order)
def users_sorted_by_count(self, count_dict, order):
# Split all_users into those in count_dict and those not.
# Sort the former by count and return
users_with_forms = []
users_without_forms = []
for user in self.all_users:
u_id = user['user_id']
if u_id in count_dict:
users_with_forms.append((count_dict[u_id], user))
else:
users_without_forms.append(user)
if order == "asc":
users_with_forms.sort()
sorted_users = users_without_forms
sorted_users += map(lambda u: u[1], users_with_forms)
else:
users_with_forms.sort(reverse=True)
sorted_users = map(lambda u: u[1], users_with_forms)
sorted_users += users_without_forms
return self.paginate_list(sorted_users)
@property
def column_count(self):
return len(self.dates) + 2
@property
def rows(self):
self.sort_col = self.request_params.get('iSortCol_0', 0)
totals_col = self.column_count - 1
order = self.request_params.get('sSortDir_0')
if self.sort_col == totals_col:
users = self.users_by_range(self.startdate, self.enddate, order)
elif 0 < self.sort_col < totals_col:
start = self.dates[self.sort_col-1]
end = start + datetime.timedelta(days=1)
users = self.users_by_range(start, end, order)
else:
users = self.users_by_username(order)
rows = [self.get_row(user) for user in users]
self.total_row = self.get_row()
return rows
@property
def get_all_rows(self):
rows = [self.get_row(user) for user in self.all_users]
self.total_row = self.get_row()
return rows
def get_row(self, user=None):
"""
Assemble a row for a given user.
If no user is passed, assemble a totals row.
"""
values = ['date']
results = FormData.objects \
.filter(doc_type='XFormInstance') \
.filter(**self.date_filter(self.startdate, self.enddate))
if user:
results = results.filter(user_id=user.user_id)
values.append('user_id')
else:
user_ids = [user_a.user_id for user_a in self.all_users]
results = results.filter(user_id__in=user_ids)
results = results.extra({'date': "date(%s AT TIME ZONE '%s')" % (self.date_field, self.timezone)}) \
.values(*values) \
.annotate(Count(self.date_field))
count_field = '%s__count' % self.date_field
counts_by_date = dict((result['date'].isoformat(), result[count_field]) for result in results)
date_cols = [
counts_by_date.get(date.strftime(DATE_FORMAT), 0)
for date in self.dates
]
first_col = self.get_raw_user_link(user) if user else _("Total")
return [first_col] + date_cols + [sum(date_cols)]
class FormCompletionTimeReport(WorkerMonitoringReportTableBase, DatespanMixin,
CompletionOrSubmissionTimeMixin):
name = ugettext_noop("Form Completion Time")
slug = "completion_times"
fields = ['corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',
'corehq.apps.reports.filters.forms.SingleFormByApplicationFilter',
'corehq.apps.reports.filters.forms.CompletionOrSubmissionTimeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
description = ugettext_noop("Statistics on time spent on a particular form.")
is_cacheable = True
def get_user_link(self, user):
params = {
"form_unknown": self.request.GET.get("form_unknown", ''),
"form_unknown_xmlns": self.request.GET.get("form_unknown_xmlns", ''),
"form_status": self.request.GET.get("form_status", ''),
"form_app_id": self.request.GET.get("form_app_id", ''),
"form_module": self.request.GET.get("form_module", ''),
"form_xmlns": self.request.GET.get("form_xmlns", ''),
"startdate": self.request.GET.get("startdate", ''),
"enddate": self.request.GET.get("enddate", '')
}
params.update(EMWF.for_user(user.user_id))
from corehq.apps.reports.standard.inspect import SubmitHistory
user_link_template = '<a href="%(link)s">%(username)s</a>'
base_link = "%s%s" % (get_url_base(), SubmitHistory.get_url(domain=self.domain))
link = "{baselink}?{params}".format(baselink=base_link, params=urlencode(params))
user_link = user_link_template % {
'link': link,
'username': user.username_in_report,
}
return self.table_cell(user.raw_username, user_link)
@property
@memoized
def selected_xmlns(self):
return SingleFormByApplicationFilter.get_value(self.request, self.domain)
@property
def headers(self):
if self.selected_xmlns['xmlns'] is None:
return DataTablesHeader(DataTablesColumn(_("No Form Selected"), sortable=False))
return DataTablesHeader(DataTablesColumn(_("User")),
DataTablesColumn(_("Average"), sort_type=DTSortType.NUMERIC),
DataTablesColumn(_("Std. Dev."), sort_type=DTSortType.NUMERIC),
DataTablesColumn(_("Shortest"), sort_type=DTSortType.NUMERIC),
DataTablesColumn(_("Longest"), sort_type=DTSortType.NUMERIC),
DataTablesColumn(_("No. of Forms"), sort_type=DTSortType.NUMERIC))
@property
def rows(self):
rows = []
if self.selected_xmlns['xmlns'] is None:
rows.append([_("You must select a specific form to view data.")])
return rows
def to_duration(val_in_s):
assert val_in_s is not None
return datetime.timedelta(seconds=val_in_s)
def to_minutes(val_in_s):
if val_in_s is None:
return "--"
return friendly_timedelta(to_duration(val_in_s))
def to_minutes_raw(val_in_s):
"""
return a timestamp like 66:12:24 (the first number is hours
"""
if val_in_s is None:
return '--'
td = to_duration(val_in_s)
hours, remainder = divmod(td.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return '{h}:{m}:{s}'.format(
h=(td.days * 24) + hours,
m=minutes,
s=seconds,
)
def _fmt(pretty_fn, val):
return format_datatables_data(pretty_fn(val), val)
def _fmt_ts(timestamp):
return format_datatables_data(to_minutes(timestamp), timestamp, to_minutes_raw(timestamp))
def get_data(users, group_by_user=True):
query = FormData.objects \
.filter(doc_type='XFormInstance') \
.filter(xmlns=self.selected_xmlns['xmlns'])
date_field = 'received_on' if self.by_submission_time else 'time_end'
date_filter = {
'{}__range'.format(date_field): (self.datespan.startdate_utc, self.datespan.enddate_utc)
}
query = query.filter(**date_filter)
if users:
query = query.filter(user_id__in=users)
if self.selected_xmlns['app_id'] is not None:
query = query.filter(app_id=self.selected_xmlns['app_id'])
if group_by_user:
query = query.values('user_id')
return query.annotate(Max('duration')) \
.annotate(Min('duration')) \
.annotate(Avg('duration')) \
.annotate(StdDev('duration')) \
.annotate(Count('duration'))
else:
return query.aggregate(
Max('duration'),
Min('duration'),
Avg('duration'),
StdDev('duration'),
Count('duration')
)
users_data = EMWF.pull_users_and_groups(
self.domain, self.request, True, True)
user_ids = [user.user_id for user in users_data.combined_users]
data_map = dict([(row['user_id'], row) for row in get_data(user_ids)])
for user in users_data.combined_users:
stats = data_map.get(user.user_id, {})
rows.append([self.get_user_link(user),
_fmt_ts(stats.get('duration__avg')),
_fmt_ts(stats.get('duration__stddev')),
_fmt_ts(stats.get("duration__min")),
_fmt_ts(stats.get("duration__max")),
_fmt(lambda x: x, stats.get("duration__count", 0)),
])
total_data = get_data(user_ids, group_by_user=False)
self.total_row = ["All Users",
_fmt_ts(total_data.get('duration__avg')),
_fmt_ts(total_data.get('duration__stddev')),
_fmt_ts(total_data.get('duration__min')),
_fmt_ts(total_data.get('duration__max')),
total_data.get('duration__count', 0)]
return rows
class FormCompletionVsSubmissionTrendsReport(WorkerMonitoringReportTableBase, MultiFormDrilldownMixin, DatespanMixin):
name = ugettext_noop("Form Completion vs. Submission Trends")
slug = "completion_vs_submission"
is_cacheable = True
description = ugettext_noop("Time lag between when forms were completed and when forms were successfully "
"sent to CommCare HQ.")
fields = ['corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',
'corehq.apps.reports.filters.forms.FormsByApplicationFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
@property
def headers(self):
return DataTablesHeader(DataTablesColumn(_("User")),
DataTablesColumn(_("Completion Time")),
DataTablesColumn(_("Submission Time")),
DataTablesColumn(_("Form Name")),
DataTablesColumn(_("View"), sortable=False),
DataTablesColumn(_("Difference"), sort_type=DTSortType.NUMERIC)
)
@property
def rows(self):
rows = []
total = 0
total_seconds = 0
if self.all_relevant_forms:
users_data = EMWF.pull_users_and_groups(
self.domain, self.request, True, True)
placeholders = []
params = []
user_map = {user.user_id: user
for user in users_data.combined_users if user.user_id}
form_map = {}
for form in self.all_relevant_forms.values():
placeholders.append('(%s,%s)')
params.extend([form['app_id'], form['xmlns']])
form_map[form['xmlns']] = form
where = '(app_id, xmlns) in (%s)' % (','.join(placeholders))
results = FormData.objects \
.filter(doc_type='XFormInstance') \
.filter(received_on__range=(self.datespan.startdate_utc, self.datespan.enddate_utc)) \
.filter(user_id__in=user_map.keys()) \
.values('instance_id', 'user_id', 'time_end', 'received_on', 'xmlns')\
.extra(
where=[where], params=params
)
for row in results:
completion_time = row['time_end'].replace(tzinfo=None)
completion_dst = False if self.timezone == pytz.utc else\
tz_utils.is_timezone_in_dst(self.timezone, completion_time)
completion_time = self.timezone.localize(completion_time, is_dst=completion_dst)
submission_time = row['received_on'].replace(tzinfo=pytz.utc)
submission_time = tz_utils.adjust_datetime_to_timezone(submission_time, pytz.utc.zone, self.timezone.zone)
td = submission_time-completion_time
td_total = (td.seconds + td.days * 24 * 3600)
rows.append([
self.get_user_link(user_map.get(row['user_id'])),
self._format_date(completion_time),
self._format_date(submission_time),
form_map[row['xmlns']]['name'],
self._view_form_link(row['instance_id']),
self.table_cell(td_total, self._format_td_status(td))
])
if td_total >= 0:
total_seconds += td_total
total += 1
else:
rows.append(['No Submissions Available for this Date Range'] + ['--']*5)
self.total_row = [_("Average"), "-", "-", "-", "-", self._format_td_status(int(total_seconds/total), False) if total > 0 else "--"]
return rows
def _format_date(self, date, d_format="%d %b %Y, %H:%M:%S"):
return self.table_cell(
date,
"%s (%s)" % (date.strftime(d_format), date.tzinfo._tzname)
)
def _format_td_status(self, td, use_label=True):
status = list()
template = '<span class="label %(klass)s">%(status)s</span>'
klass = ""
if isinstance(td, int):
td = datetime.timedelta(seconds=td)
if isinstance(td, datetime.timedelta):
hours = td.seconds//3600
minutes = (td.seconds//60)%60
vals = [td.days, hours, minutes, (td.seconds - hours*3600 - minutes*60)]
names = [_("day"), _("hour"), _("minute"), _("second")]
status = ["%s %s%s" % (val, names[i], "s" if val != 1 else "") for (i, val) in enumerate(vals) if val > 0]
if td.days > 1:
klass = "label-important"
elif td.days == 1:
klass = "label-warning"
elif hours > 5:
klass = "label-info"
if not status:
status.append("same")
elif td.days < 0:
if abs(td).seconds > 15*60:
status = [_("submitted before completed [strange]")]
klass = "label-inverse"
else:
status = [_("same")]
if use_label:
return template % dict(status=", ".join(status), klass=klass)
else:
return ", ".join(status)
def _view_form_link(self, instance_id):
return '<a class="btn" href="%s">View Form</a>' % reverse('render_form_data', args=[self.domain, instance_id])
class WorkerMonitoringChartBase(ProjectReport, ProjectReportParametersMixin):
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.users.SelectMobileWorkerFilter']
flush_layout = True
report_template_path = "reports/async/basic.html"
class WorkerActivityTimes(WorkerMonitoringChartBase,
MultiFormDrilldownMixin, CompletionOrSubmissionTimeMixin, DatespanMixin):
name = ugettext_noop("Worker Activity Times")
slug = "worker_activity_times"
is_cacheable = True
description = ugettext_noop("Graphical representation of when forms are "
"completed or submitted.")
fields = [
'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',
'corehq.apps.reports.filters.forms.FormsByApplicationFilter',
'corehq.apps.reports.filters.forms.CompletionOrSubmissionTimeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
report_partial_path = "reports/partials/punchcard.html"
@property
@memoized
def activity_times(self):
all_times = []
users_data = EMWF.pull_users_and_groups(
self.domain, self.request, True, True)
for user in users_data.combined_users:
for form, info in self.all_relevant_forms.items():
key = make_form_couch_key(
self.domain,
user_id=user.user_id,
xmlns=info['xmlns'],
app_id=info['app_id'],
by_submission_time=self.by_submission_time,
)
data = get_db().view("reports_forms/all_forms",
reduce=False,
startkey=key+[self.datespan.startdate_param_utc],
endkey=key+[self.datespan.enddate_param_utc],
).all()
all_times.extend([dateutil.parser.parse(d['key'][-1]) for d in data])
if self.by_submission_time:
# completion time is assumed to be in the phone's timezone until we can send proper timezone info
all_times = [tz_utils.adjust_datetime_to_timezone(t, pytz.utc.zone, self.timezone.zone) for t in all_times]
return [(t.weekday(), t.hour) for t in all_times]
@property
def report_context(self):
chart_data = defaultdict(int)
for time in self.activity_times:
chart_data[time] += 1
return dict(
chart_url=self.generate_chart(chart_data),
no_data=not self.activity_times,
timezone=self.timezone,
)
@classmethod
def generate_chart(cls, data, width=950, height=300):
"""
Gets a github style punchcard chart.
Hat tip: http://github.com/dustin/bindir/blob/master/gitaggregates.py
"""
no_data = not data
try:
from pygooglechart import ScatterChart
except ImportError:
raise Exception("WorkerActivityTimes requires pygooglechart.")
chart = ScatterChart(width, height, x_range=(-1, 24), y_range=(-1, 7))
chart.add_data([(h % 24) for h in range(24 * 8)])
d=[]
for i in range(8):
d.extend([i] * 24)
chart.add_data(d)
# mapping between numbers 0..6 and its day of the week label
day_names = "Mon Tue Wed Thu Fri Sat Sun".split(" ")
# the order, bottom-to-top, in which the days should appear
# i.e. Sun, Sat, Fri, Thu, etc
days = (6, 5, 4, 3, 2, 1, 0)
sizes=[]
for d in days:
sizes.extend([data[(d, h)] for h in range(24)])
sizes.extend([0] * 24)
if no_data:
# fill in a line out of view so that chart.get_url() doesn't crash
sizes.extend([1] * 24)
chart.add_data(sizes)
chart.set_axis_labels('x', [''] + [str(h) for h in range(24)] + [''])
chart.set_axis_labels('y', [''] + [day_names[n] for n in days] + [''])
chart.add_marker(1, 1.0, 'o', '333333', 25)
return chart.get_url() + '&chds=-1,24,-1,7,0,20'
class WorkerActivityReport(WorkerMonitoringReportTableBase, DatespanMixin):
slug = 'worker_activity'
name = ugettext_noop("Worker Activity")
description = ugettext_noop("Summary of form and case activity by user or group.")
section_name = ugettext_noop("Project Reports")
num_avg_intervals = 3 # how many duration intervals we go back to calculate averages
is_cacheable = True
fields = [
'corehq.apps.reports.dont_use.fields.MultiSelectGroupField',
'corehq.apps.reports.dont_use.fields.UserOrGroupField',
'corehq.apps.reports.filters.select.MultiCaseTypeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
]
fix_left_col = True
emailable = True
@property
@memoized
def case_types_filter(self):
case_types = filter(None, self.request.GET.getlist('case_type'))
if case_types:
return {"terms": {"type.exact": case_types}}
return {}
@property
def view_by(self):
return self.request.GET.get('view_by', None)
@property
def headers(self):
CASE_TYPE_MSG = "The case type filter doesn't affect this column."
by_group = self.view_by == 'groups'
columns = [DataTablesColumn(_("Group"))] if by_group else [DataTablesColumn(_("User"))]
columns.append(DataTablesColumnGroup(_("Form Data"),
DataTablesColumn(_("# Forms Submitted"), sort_type=DTSortType.NUMERIC,
help_text=_("Number of forms submitted in chosen date range. %s" % CASE_TYPE_MSG)),
DataTablesColumn(_("Avg # Forms Submitted"), sort_type=DTSortType.NUMERIC,
help_text=_("Average number of forms submitted in the last three date ranges of the same length. %s" % CASE_TYPE_MSG)),
DataTablesColumn(_("Last Form Submission"),
help_text=_("Date of last form submission in time period. Total row displays proportion of users submitting forms in date range")) \
if not by_group else DataTablesColumn(_("# Active Users"), sort_type=DTSortType.NUMERIC,
help_text=_("Proportion of users in group who submitted forms in date range."))
))
columns.append(DataTablesColumnGroup(_("Case Data"),
DataTablesColumn(_("# Cases Created"), sort_type=DTSortType.NUMERIC,
help_text=_("Number of cases created in the date range.")),
DataTablesColumn(_("# Cases Closed"), sort_type=DTSortType.NUMERIC,
help_text=_("Number of cases closed in the date range.")),
))
columns.append(DataTablesColumnGroup(_("Case Activity"),
DataTablesColumn(_("# Active Cases"), sort_type=DTSortType.NUMERIC,
help_text=_("Number of cases owned by the user that were opened, modified or closed in date range. This includes case sharing cases.")),
DataTablesColumn(_("# Total Cases"), sort_type=DTSortType.NUMERIC,
help_text=_("Total number of cases owned by the user. This includes case sharing cases.")),
DataTablesColumn(_("% Active Cases"), sort_type=DTSortType.NUMERIC,
help_text=_("Percentage of cases owned by user that were active. This includes case sharing cases.")),
))
return DataTablesHeader(*columns)
@property
def users_to_iterate(self):
if '_all' in self.group_ids:
ret = [util._report_user_dict(u) for u in list(CommCareUser.by_domain(self.domain))]
return ret
else:
return self.combined_users
def es_form_submissions(self, datespan=None, dict_only=False):
datespan = datespan or self.datespan
q = {"query": {
"bool": {
"must": [
{"match": {"domain.exact": self.domain}},
{"range": {
"form.meta.timeEnd": {
"from": datespan.startdate_param,
"to": datespan.enddate_param,
"include_upper": True}}}]}}}
q["filter"] = {"and": ADD_TO_ES_FILTER["forms"][:]}
facets = ['form.meta.userID']
return es_query(q=q, facets=facets, es_url=XFORM_INDEX + '/xform/_search', size=1, dict_only=dict_only)
def es_last_submissions(self, datespan=None, dict_only=False):
"""
Creates a dict of userid => date of last submission
"""
datespan = datespan or self.datespan
def es_q(user_id):
q = {"query": {
"bool": {
"must": [
{"match": {"domain.exact": self.domain}},
{"match": {"form.meta.userID": user_id}},
{"range": {
"form.meta.timeEnd": {
"from": datespan.startdate_param,
"to": datespan.enddate_param,
"include_upper": True}}}
]}},
"sort": {"form.meta.timeEnd" : {"order": "desc"}}}
results = es_query(q=q, es_url=XFORM_INDEX + '/xform/_search', size=1, dict_only=dict_only)['hits']['hits']
return results[0]['_source']['form']['meta']['timeEnd'] if results else None
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def convert_date(date):
return datetime.datetime.strptime(date, DATE_FORMAT).date() if date else None
return dict([(u["user_id"], convert_date(es_q(u["user_id"]))) for u in self.users_to_iterate])
def es_case_queries(self, date_field, user_field='user_id', datespan=None, dict_only=False):
datespan = datespan or self.datespan
q = {"query": {
"bool": {
"must": [
{"match": {"domain.exact": self.domain}},
{"range": {
date_field: {
"from": datespan.startdate_param,
"to": datespan.enddate_param,
"include_upper": True}}}
]}}}
if self.case_types_filter:
q["query"]["bool"]["must"].append(self.case_types_filter)
facets = [user_field]
return es_query(q=q, facets=facets, es_url=CASE_INDEX + '/case/_search', size=1, dict_only=dict_only)
def es_active_cases(self, datespan=None, dict_only=False):
"""
Open cases that haven't been modified within time range
"""
datespan = datespan or self.datespan
q = {"query": {
"bool": {
"must": [
{"match": {"domain.exact": self.domain}},
{"nested": {
"path": "actions",
"query": {
"range": {
"actions.date": {
"from": datespan.startdate_param,
"to": datespan.enddate_param,
"include_upper": True}}}}}]}}}
if self.case_types_filter:
q["query"]["bool"]["must"].append(self.case_types_filter)
facets = ['owner_id']
return es_query(q=q, facets=facets, es_url=CASE_INDEX + '/case/_search', size=1, dict_only=dict_only)
def es_total_cases(self, datespan=None, dict_only=False):
datespan = datespan or self.datespan
q = {"query": {
"bool": {
"must": [
{"match": {"domain.exact": self.domain}},
{"range": {"opened_on": {"lte": datespan.enddate_param}}}],
"must_not": {"range": {"closed_on": {"lt": datespan.startdate_param}}}}}}
if self.case_types_filter:
q["query"]["bool"]["must"].append(self.case_types_filter)
facets = ['owner_id']
return es_query(q=q, facets=facets, es_url=CASE_INDEX + '/case/_search', size=1, dict_only=dict_only)
@property
def rows(self):
duration = (self.datespan.enddate - self.datespan.startdate) + datetime.timedelta(days=1) # adjust bc inclusive
avg_datespan = DateSpan(self.datespan.startdate - (duration * self.num_avg_intervals),
self.datespan.startdate - datetime.timedelta(days=1))
if avg_datespan.startdate.year < 1900: # srftime() doesn't work for dates below 1900
avg_datespan.startdate = datetime.datetime(1900, 1, 1)
form_data = self.es_form_submissions()
submissions_by_user = dict([(t["term"], t["count"]) for t in form_data["facets"]["form.meta.userID"]["terms"]])
avg_form_data = self.es_form_submissions(datespan=avg_datespan)
avg_submissions_by_user = dict([(t["term"], t["count"]) for t in avg_form_data["facets"]["form.meta.userID"]["terms"]])
if self.view_by == 'groups':
active_users_by_group = dict([(g, len(filter(lambda u: submissions_by_user.get(u['user_id']), users)))
for g, users in self.users_by_group.iteritems()])
else:
last_form_by_user = self.es_last_submissions()
case_creation_data = self.es_case_queries('opened_on', 'opened_by')
creations_by_user = dict([(t["term"].lower(), t["count"])
for t in case_creation_data["facets"]["opened_by"]["terms"]])
case_closure_data = self.es_case_queries('closed_on', 'closed_by')
closures_by_user = dict([(t["term"].lower(), t["count"])
for t in case_closure_data["facets"]["closed_by"]["terms"]])
active_case_data = self.es_active_cases()
actives_by_owner = dict([(t["term"].lower(), t["count"])
for t in active_case_data["facets"]["owner_id"]["terms"]])
total_case_data = self.es_total_cases()
totals_by_owner = dict([(t["term"].lower(), t["count"])
for t in total_case_data["facets"]["owner_id"]["terms"]])
def dates_for_linked_reports(case_list=False):
start_date = self.datespan.startdate_param
end_date = self.datespan.enddate if not case_list else self.datespan.enddate + datetime.timedelta(days=1)
end_date = end_date.strftime(self.datespan.format)
return start_date, end_date
def submit_history_link(owner_id, val, type):
"""
takes a row, and converts certain cells in the row to links that link to the submit history report
"""
fs_url = reverse('project_report_dispatcher', args=(self.domain, 'submit_history'))
if type == 'user':
url_args = EMWF.for_user(owner_id)
else:
assert type == 'group'
url_args = EMWF.for_reporting_group(owner_id)
start_date, end_date = dates_for_linked_reports()
url_args.update({
"startdate": start_date,
"enddate": end_date,
})
return util.numcell(u'<a href="{base}{report}?{params}" target="_blank">{display}</a>'.format(
base=get_url_base(),
report=fs_url,
params=urlencode(url_args, True),
display=val,
), val)
def add_case_list_links(owner_id, row):
"""
takes a row, and converts certain cells in the row to links that link to the case list page
"""
cl_url = reverse('project_report_dispatcher', args=(self.domain, 'case_list'))
url_args = EMWF.for_user(owner_id)
start_date, end_date = dates_for_linked_reports(case_list=True)
start_date_sub1 = self.datespan.startdate - datetime.timedelta(days=1)
start_date_sub1 = start_date_sub1.strftime(self.datespan.format)
search_strings = {
4: "opened_by: %s AND opened_on: [%s TO %s]" % (owner_id, start_date, end_date), # cases created
5: "closed_by: %s AND closed_on: [%s TO %s]" % (owner_id, start_date, end_date), # cases closed
7: "opened_on: [* TO %s] AND NOT closed_on: [* TO %s]" % (end_date, start_date_sub1), # total cases
}
if today_or_tomorrow(self.datespan.enddate):
search_strings[6] = "modified_on: [%s TO %s]" % (start_date, end_date) # active cases
if self.case_type:
for index, search_string in search_strings.items():
search_strings[index] = search_string + " AND type.exact: %s" % self.case_type
def create_case_url(index):
"""
Given an index for a cell in a the row, creates the link to the case list page for that cell
"""
url_params = {}
url_params.update(url_args)
url_params.update({"search_query": search_strings[index]})
return util.numcell('<a href="%s?%s" target="_blank">%s</a>' % (cl_url, urlencode(url_params, True), row[index]), row[index])
for i in search_strings:
row[i] = create_case_url(i)
return row
def group_cell(group_id, group_name):
"""
takes group info, and creates a cell that links to the user status report focused on the group
"""
us_url = reverse('project_report_dispatcher', args=(self.domain, 'worker_activity'))
start_date, end_date = dates_for_linked_reports()
url_args = {
"group": group_id,
"startdate": start_date,
"enddate": end_date,
}
return util.format_datatables_data(
'<a href="%s?%s" target="_blank">%s</a>' % (us_url, urlencode(url_args, True), group_name),
group_name
)
rows = []
NO_FORMS_TEXT = _('None')
if self.view_by == 'groups':
for group, users in self.users_by_group.iteritems():
group_name, group_id = tuple(group.split('|'))
if group_name == 'no_group':
continue
case_sharing_groups = set(reduce(operator.add, [u['group_ids'] for u in users], []))
active_cases = sum([int(actives_by_owner.get(u["user_id"].lower(), 0)) for u in users]) + \
sum([int(actives_by_owner.get(g_id, 0)) for g_id in case_sharing_groups])
total_cases = sum([int(totals_by_owner.get(u["user_id"].lower(), 0)) for u in users]) + \
sum([int(totals_by_owner.get(g_id, 0)) for g_id in case_sharing_groups])
active_users = int(active_users_by_group.get(group, 0))
total_users = len(self.users_by_group.get(group, []))
rows.append([
group_cell(group_id, group_name),
submit_history_link(group_id,
sum([int(submissions_by_user.get(user["user_id"], 0)) for user in users]),
type='group'),
util.numcell(sum([int(avg_submissions_by_user.get(user["user_id"], 0)) for user in users]) / self.num_avg_intervals),
util.numcell("%s / %s" % (active_users, total_users),
int((float(active_users)/total_users) * 10000) if total_users else -1),
util.numcell(sum([int(creations_by_user.get(user["user_id"].lower(), 0)) for user in users])),
util.numcell(sum([int(closures_by_user.get(user["user_id"].lower(), 0)) for user in users])),
util.numcell(active_cases),
util.numcell(total_cases),
util.numcell((float(active_cases)/total_cases) * 100 if total_cases else 'nan', convert='float'),
])
else:
for user in self.users_to_iterate:
active_cases = int(actives_by_owner.get(user["user_id"].lower(), 0)) + \
sum([int(actives_by_owner.get(group_id, 0)) for group_id in user["group_ids"]])
total_cases = int(totals_by_owner.get(user["user_id"].lower(), 0)) + \
sum([int(totals_by_owner.get(group_id, 0)) for group_id in user["group_ids"]])
rows.append(add_case_list_links(user['user_id'], [
user["username_in_report"],
submit_history_link(user['user_id'],
submissions_by_user.get(user["user_id"], 0),
type='user'),
util.numcell(int(avg_submissions_by_user.get(user["user_id"], 0)) / self.num_avg_intervals),
last_form_by_user.get(user["user_id"]) or NO_FORMS_TEXT,
int(creations_by_user.get(user["user_id"].lower(),0)),
int(closures_by_user.get(user["user_id"].lower(), 0)),
util.numcell(active_cases) if not today_or_tomorrow(self.datespan.enddate) else active_cases,
total_cases,
util.numcell((float(active_cases)/total_cases) * 100 if total_cases else 'nan', convert='float'),
]))
self.total_row = [_("Total")]
summing_cols = [1, 2, 4, 5, 6, 7]
for col in range(1, len(self.headers)):
if col in summing_cols:
self.total_row.append(sum(filter(lambda x: not math.isnan(x), [row[col].get('sort_key', 0) for row in rows])))
else:
self.total_row.append('---')
if self.view_by == 'groups':
def parse(str):
num, denom = tuple(str.split('/'))
num = int(num.strip())
denom = int(denom.strip())
return num, denom
def add(result_tuple, str):
num, denom = parse(str)
return num + result_tuple[0], denom + result_tuple[1]
self.total_row[3] = '%s / %s' % reduce(add, [row[3]["html"] for row in rows], (0, 0))
else:
num = len(filter(lambda row: row[3] != NO_FORMS_TEXT, rows))
self.total_row[3] = '%s / %s' % (num, len(rows))
return rows
| |
from unittest import mock
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo import search
from olympia.amo.tests import ESTestCaseWithAddons, TestCase
from olympia.tags.models import Tag
class TestESIndexing(ESTestCaseWithAddons):
# This needs to be in its own class for data isolation.
def test_indexed_count(self):
# Did all the right addons get indexed?
count = Addon.search().filter(type=1, is_disabled=False).count()
# Created in the setUpClass.
assert count == 4 == (
Addon.objects.filter(disabled_by_user=False,
status__in=amo.VALID_ADDON_STATUSES).count())
def test_get_es_not_mocked(self):
es = search.get_es()
assert not issubclass(es.__class__, mock.Mock)
class TestNoESIndexing(TestCase):
def test_no_es(self):
assert not getattr(self, 'es', False), (
'TestCase should not have "es" attribute')
def test_not_indexed(self):
addon = Addon.objects.create(type=amo.ADDON_EXTENSION,
status=amo.STATUS_APPROVED)
assert issubclass(
Addon.search().filter(id__in=addon.id).count().__class__,
mock.Mock)
def test_get_es_mocked(self):
es = search.get_es()
assert issubclass(es.__class__, mock.Mock)
class TestESWithoutMakingQueries(TestCase):
# These tests test methods that don't directly call ES, so they work using
# the faster TestCase class where ES is mocked.
def test_clone(self):
# Doing a filter creates a new ES object.
qs = Addon.search()
qs2 = qs.filter(type=1)
assert 'bool' not in qs._build_query()['query']
assert 'filter' in qs2._build_query()['query']['bool']
def test_filter(self):
qs = Addon.search().filter(type=1)
assert qs._build_query()['query']['bool']['filter'] == (
[{'term': {'type': 1}}])
def test_in_filter(self):
qs = Addon.search().filter(type__in=[1, 2])
assert qs._build_query()['query']['bool']['filter'] == (
[{'terms': {'type': [1, 2]}}])
def test_and(self):
qs = Addon.search().filter(type=1, category__in=[1, 2])
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [{'term': {'type': 1}}, {'terms': {'category': [1, 2]}}]
assert len(filters) == 2
assert {'term': {'type': 1}} in filters
assert {'terms': {'category': [1, 2]}} in filters
def test_query(self):
qs = Addon.search().query(type=1)
assert qs._build_query()['query'] == (
{'term': {'type': 1}})
def test_query_match(self):
qs = Addon.search().query(name__match='woo woo')
assert qs._build_query()['query'] == (
{'match': {'name': 'woo woo'}})
def test_query_multiple_and_range(self):
qs = Addon.search().query(type=1, status__gte=1)
query = qs._build_query()['query']
# Query:
# {'bool': {'must': [{'term': {'type': 1}},
# {'range': {'status': {'gte': 1}}}, ]}}
assert list(query.keys()) == ['bool']
assert list(query['bool'].keys()) == ['must']
assert {'term': {'type': 1}} in query['bool']['must']
assert {'range': {'status': {'gte': 1}}} in query['bool']['must']
def test_query_fuzzy(self):
fuzz = {'boost': 2, 'value': 'woo'}
qs = Addon.search().query(type=1, status__fuzzy=fuzz)
query = qs._build_query()['query']
# Query:
# {'bool': {'must': [{'fuzzy': {'status': fuzz}},
# {'term': {'type': 1}}, ]}})
assert list(query.keys()) == ['bool']
assert list(query['bool'].keys()) == ['must']
assert {'term': {'type': 1}} in query['bool']['must']
assert {'fuzzy': {'status': fuzz}} in query['bool']['must']
def test_order_by_desc(self):
qs = Addon.search().order_by('-rating')
assert qs._build_query()['sort'] == [{'rating': 'desc'}]
def test_order_by_asc(self):
qs = Addon.search().order_by('rating')
assert qs._build_query()['sort'] == ['rating']
def test_order_by_multiple(self):
qs = Addon.search().order_by('-rating', 'id')
assert qs._build_query()['sort'] == [{'rating': 'desc'}, 'id']
def test_slice(self):
qs = Addon.search()[5:12]
assert qs._build_query()['from'] == 5
assert qs._build_query()['size'] == 7
def test_slice_stop(self):
qs = Addon.search()[:6]
assert qs._build_query()['size'] == 6
def test_slice_stop_zero(self):
qs = Addon.search()[:0]
assert qs._build_query()['size'] == 0
def test_gte(self):
qs = Addon.search().filter(type__in=[1, 2], status__gte=4)
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [
# {'terms': {'type': [1, 2]}},
# {'range': {'status': {'gte': 4}}},
# ]
assert len(filters)
assert {'terms': {'type': [1, 2]}} in filters
assert {'range': {'status': {'gte': 4}}} in filters
def test_lte(self):
qs = Addon.search().filter(type__in=[1, 2], status__lte=4)
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [
# {'terms': {'type': [1, 2]}},
# {'range': {'status': {'lte': 4}}},
# ]
assert len(filters) == 2
assert {'terms': {'type': [1, 2]}} in filters
assert {'range': {'status': {'lte': 4}}} in filters
def test_gt(self):
qs = Addon.search().filter(type__in=[1, 2], status__gt=4)
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [
# {'terms': {'type': [1, 2]}},
# {'range': {'status': {'gt': 4}}},
# ]
assert len(filters) == 2
assert {'terms': {'type': [1, 2]}} in filters
assert {'range': {'status': {'gt': 4}}} in filters
def test_lt(self):
qs = Addon.search().filter(type__in=[1, 2], status__lt=4)
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [
# {'range': {'status': {'lt': 4}}},
# {'terms': {'type': [1, 2]}},
# ]
assert len(filters)
assert {'range': {'status': {'lt': 4}}} in filters
assert {'terms': {'type': [1, 2]}} in filters
def test_lt2(self):
qs = Addon.search().filter(status__lt=4)
assert qs._build_query()['query']['bool']['filter'] == (
[{'range': {'status': {'lt': 4}}}])
def test_range(self):
qs = Addon.search().filter(date__range=('a', 'b'))
assert qs._build_query()['query']['bool']['filter'] == (
[{'range': {'date': {'gte': 'a', 'lte': 'b'}}}])
def test_prefix(self):
qs = Addon.search().query(name__startswith='woo')
assert qs._build_query()['query'] == (
{'prefix': {'name': 'woo'}})
def test_values(self):
qs = Addon.search().values('name')
assert qs._build_query()['_source'] == ['id', 'name']
def test_values_dict(self):
qs = Addon.search().values_dict('name')
assert qs._build_query()['_source'] == ['id', 'name']
def test_empty_values_dict(self):
qs = Addon.search().values_dict()
assert qs._build_query()['_source'] == ['id']
def test_extra_values(self):
qs = Addon.search().extra(values=['name'])
assert qs._build_query()['_source'] == ['id', 'name']
qs = Addon.search().values('status').extra(values=['name'])
assert qs._build_query()['_source'] == ['id', 'status', 'name']
def test_extra_values_dict(self):
qs = Addon.search().extra(values_dict=['name'])
assert qs._build_query()['_source'] == ['id', 'name']
qs = Addon.search().values_dict('status').extra(values_dict=['name'])
assert qs._build_query()['_source'] == ['id', 'status', 'name']
def test_extra_order_by(self):
qs = Addon.search().extra(order_by=['-rating'])
assert qs._build_query()['sort'] == [{'rating': 'desc'}]
qs = Addon.search().order_by('-id').extra(order_by=['-rating'])
assert qs._build_query()['sort'] == [
{'id': 'desc'}, {'rating': 'desc'}]
def test_extra_query(self):
qs = Addon.search().extra(query={'type': 1})
assert qs._build_query()['query'] == (
{'term': {'type': 1}})
qs = Addon.search().filter(status=1).extra(query={'type': 1})
filtered = qs._build_query()['query']['bool']
assert filtered['must'] == (
[{'term': {'type': 1}}])
assert filtered['filter'] == [{'term': {'status': 1}}]
def test_extra_filter(self):
qs = Addon.search().extra(filter={'category__in': [1, 2]})
assert qs._build_query()['query']['bool']['filter'] == (
[{'terms': {'category': [1, 2]}}])
qs = (Addon.search().filter(type=1)
.extra(filter={'category__in': [1, 2]}))
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [{'term': {'type': 1}}, {'terms': {'category': [1, 2]}}]
assert len(filters) == 2
assert {'term': {'type': 1}} in filters
assert {'terms': {'category': [1, 2]}} in filters
def test_source(self):
qs = Addon.search().source('versions')
assert qs._build_query()['_source'] == ['id', 'versions']
class TestES(ESTestCaseWithAddons):
def test_getitem(self):
addons = list(Addon.search())
assert addons[0] == Addon.search()[0]
def test_iter(self):
qs = Addon.search().filter(type=1, is_disabled=False)
assert len(qs) == len(list(qs))
def test_count(self):
assert Addon.search().count() == 6
def test_count_uses_cached_results(self):
qs = Addon.search()
qs._results_cache = mock.Mock()
qs._results_cache.count = mock.sentinel.count
assert qs.count() == mock.sentinel.count
def test_len(self):
qs = Addon.search()
qs._results_cache = [1]
assert len(qs) == 1
def test_values_result(self):
addons = [{'id': a.id, 'slug': a.slug} for a in self._addons]
qs = Addon.search().values_dict('slug').order_by('id')
assert list(qs) == addons
def test_values_dict_result(self):
addons = [{'id': a.id, 'slug': a.slug} for a in self._addons]
qs = Addon.search().values_dict('slug').order_by('id')
assert list(qs) == list(addons)
def test_empty_values_dict_result(self):
qs = Addon.search().values_dict()
assert list(qs[0].keys()) == ['id']
def test_object_result(self):
qs = Addon.search().filter(id=self._addons[0].id)[:1]
assert self._addons[:1] == list(qs)
def test_object_result_slice(self):
addon = self._addons[0]
qs = Addon.search().filter(id=addon.id)
assert addon == qs[0]
def test_extra_bad_key(self):
with self.assertRaises(AssertionError):
Addon.search().extra(x=1)
def test_aggregations(self):
Tag(tag_text='sky').save_tag(self._addons[0])
Tag(tag_text='sky').save_tag(self._addons[1])
Tag(tag_text='sky').save_tag(self._addons[2])
Tag(tag_text='earth').save_tag(self._addons[0])
Tag(tag_text='earth').save_tag(self._addons[1])
Tag(tag_text='ocean').save_tag(self._addons[0])
self.reindex(Addon)
qs = Addon.search().aggregate(tags={'terms': {'field': 'tags'}})
results = list(qs)
assert len(results) == 6
assert qs.aggregations == {
u'tags': [
{u'doc_count': 3, u'key': u'sky'},
{u'doc_count': 2, u'key': u'earth'},
{u'doc_count': 1, u'key': u'ocean'}]}
| |
import csv
import os
import re
import sys
from national_voter_file.transformers.base import (DATA_DIR,
BasePreparer,
BaseTransformer)
import usaddress
__all__ = ['default_file', 'StatePreparer', 'StateTransformer']
default_file = 'ncvoter_StatewideSAMPLE.csv'
class StatePreparer(BasePreparer):
state_path = 'nc'
state_name = 'NorthCarolina'
sep = '\t'
def __init__(self, input_path, *args, **kwargs):
super(StatePreparer, self).__init__(input_path, *args, **kwargs)
if not self.transformer:
self.transformer = StateTransformer()
def process(self):
reader = self.dict_iterator(self.open(self.input_path))
for row in reader:
yield row
class StateTransformer(BaseTransformer):
date_format = '%m/%d/%Y'
input_fields = None
col_type_dict = BaseTransformer.col_type_dict.copy()
col_type_dict['PRECINCT_SPLIT'] = set([str, type(None)])
col_map = {
'PRECINCT_SPLIT': None
}
north_carolina_party_map = {
'DEM':'DEM',
'REP':'REP',
'LIB' : 'LIB',
'UNA':'UN',
' ' : 'UN',
'': 'UN'
}
# There are #<street type> artifacts in street address. These
# will be ignored until more information can clarify purpose.
hashtag_patterns = ['#RD', '#ROAD', '#DR', '#DRIVE', '#LN', '#LANE']
hashtag_patterns += ['#WAY', '#CIRCLE', '#CIR', '#SLIP', '#BLVD', '#MAIN']
hashtag_patterns += ['#HILL', '#HIGHWAY']
#### Contact methods #######################################################
def extract_name(self, input_dict):
return {
'TITLE': input_dict['name_prefx_cd'],
'FIRST_NAME': input_dict['first_name'],
'MIDDLE_NAME': input_dict['middle_name'],
'LAST_NAME': input_dict['last_name'],
'NAME_SUFFIX': input_dict['name_suffix_lbl'],
}
def extract_email(self, input_dict):
return {'EMAIL' : None}
def extract_phone_number(self, input_dict):
# TODO: Add parenthesis to area code?
return {'PHONE' : input_dict['full_phone_number']}
def extract_do_not_call_status(self, input_dict):
return {'DO_NOT_CALL_STATUS' : None}
#### Demographics methods ##################################################
def extract_gender(self, input_dict):
gender = input_dict['gender_code']
if len(gender) == 0 or gender == ' ':
gender = None
return {'GENDER' : gender}
def extract_race(self, input_dict):
"""
Inputs:
input_dict: dictionary of form {colname: value} from raw data
Outputs:
Dictionary with following keys
'RACE'
"""
race = input_dict['race_code']
if len(race) == 0 or race == ' ':
race = None
return {'RACE' : race}
def extract_birth_state(self, input_dict):
"""
Inputs:
input_dict: dictionary of form {colname: value} from raw data
Outputs:
Dictionary with following keys
'BIRTH_STATE'
"""
return {'BIRTH_STATE': input_dict['birth_state']}
def extract_birthdate(self, input_dict):
"""
Inputs:
input_dict: dictionary of form {colname: value} from raw data
Outputs:
Dictionary with following keys
'BIRTHDATE'
"""
#TODO: Estimate from age/age range?
return {'BIRTHDATE' : None,
'BIRTHDATE_IS_ESTIMATE' : 'Yes'}
def extract_language_choice(self, input_dict):
"""
Inputs:
input_dict: dictionary of form {colname: value} from raw data
Outputs:
Dictionary with following keys
'LANGUAGE_CHOICE'
"""
return {'LANGUAGE_CHOICE' : None}
#### Address methods #######################################################
def extract_registration_address(self, input_dict):
"""
Relies on the usaddress package.
Call the self.convert_usaddress_dict() method on the output of
usaddress.tag. We provide example code in the method to make this clear.
Inputs:
input_dict: dictionary of form {colname: value} from raw data
Outputs:
Dictionary with following keys
'ADDRESS_NUMBER'
'ADDRESS_NUMBER_PREFIX'
'ADDRESS_NUMBER_SUFFIX'
'BUILDING_NAME'
'CORNER_OF'
'INTERSECTION_SEPARATOR'
'LANDMARK_NAME'
'NOT_ADDRESS'
'OCCUPANCY_TYPE'
'OCCUPANCY_IDENTIFIER'
'PLACE_NAME'
'STATE_NAME'
'STREET_NAME'
'STREET_NAME_PRE_DIRECTIONAL'
'STREET_NAME_PRE_MODIFIER'
'STREET_NAME_PRE_TYPE'
'STREET_NAME_POST_DIRECTIONAL'
'STREET_NAME_POST_MODIFIER'
'STREET_NAME_POST_TYPE'
'SUBADDRESS_IDENTIFIER'
'SUBADDRESS_TYPE'
'USPS_BOX_GROUP_ID'
'USPS_BOX_GROUP_TYPE'
'USPS_BOX_ID'
'USPS_BOX_TYPE'
'ZIP_CODE'
"""
# columns to create address, in order
address_components = [
'res_street_address',
]
# create address string for usaddress.tag
address_str = ' '.join([
input_dict[x] for x in address_components if input_dict[x] is not None
])
# res_street_address contains #<description> in some entries.
# This is removed until we know we should not remove it.
res_street_addr = input_dict['res_street_address']
for pattern in self.hashtag_patterns:
if pattern in res_street_addr:
res_street_addr = ' '.join(res_street_addr.split(pattern))
if pattern in address_str:
address_str = ' '.join(address_str.split(pattern))
# save the raw information too
raw_dict = {
'RAW_ADDR1' : res_street_addr,
'RAW_ADDR2' : None,
'RAW_CITY' : input_dict['res_city_desc'],
'RAW_ZIP' : input_dict['zip_code']
}
state_name = input_dict['state_cd']
if len(state_name.strip()) == 0:
state_name = 'NC'
# use the usaddress_tag method to handle errors
usaddress_dict = self.usaddress_tag(address_str)[0]
# use the convert_usaddress_dict to get correct column names
# and fill in missing values
if usaddress_dict:
converted_addr = self.convert_usaddress_dict(usaddress_dict)
converted_addr.update({
'PLACE_NAME': raw_dict['RAW_CITY'],
'STATE_NAME': state_name,
'ZIP_CODE': raw_dict['RAW_ZIP'],
'VALIDATION_STATUS':'2'
})
converted_addr.update(raw_dict)
else:
converted_addr = self.constructEmptyResidentialAddress()
converted_addr.update(raw_dict)
converted_addr.update({
'STATE_NAME': state_name,
'VALIDATION_STATUS': '1'
})
return converted_addr
def extract_county_code(self, input_dict):
return {'COUNTYCODE' : input_dict['county_id']}
def extract_mailing_address(self, input_dict):
"""
Relies on the usaddress package.
Inputs:
input_dict: dictionary of form {colname: value} from raw data
Outputs:
Dictionary with following keys
'MAIL_ADDRESS_LINE1'
'MAIL_ADDRESS_LINE2'
'MAIL_CITY'
'MAIL_STATE'
'MAIL_ZIP_CODE'
'MAIL_COUNTRY'
"""
if input_dict['mail_addr1'].strip() and input_dict['mail_city'].strip():
return {
'MAIL_ADDRESS_LINE1': input_dict['mail_addr1'],
'MAIL_ADDRESS_LINE2': " ".join([
input_dict['mail_addr2'],
input_dict['mail_addr3'],
input_dict['mail_addr4']]),
'MAIL_CITY': input_dict['mail_city'],
'MAIL_STATE': input_dict['mail_state'],
'MAIL_ZIP_CODE': input_dict['mail_zipcode'],
'MAIL_COUNTRY': 'USA'
}
else:
return {}
#### Political methods #####################################################
def extract_state_voter_ref(self, input_dict):
return {'STATE_VOTER_REF' : 'NC' + input_dict['voter_reg_num']}
def extract_county_voter_ref(self, input_dict):
return {'COUNTY_VOTER_REF' : None}
def extract_registration_date(self, input_dict):
return {'REGISTRATION_DATE' : self.convert_date(input_dict['registr_dt'])}
def extract_registration_status(self, input_dict):
return {'REGISTRATION_STATUS' : input_dict['voter_status_desc']}
def extract_absentee_type(self, input_dict):
return {'ABSENTEE_TYPE' : None}
def extract_party(self, input_dict):
return {'PARTY' : self.north_carolina_party_map[input_dict['party_cd']]}
def extract_congressional_dist(self, input_dict):
cong_dist = input_dict['cong_dist_abbrv']
if cong_dist == ' ' or len(cong_dist) == 0:
cong_dist = None
return {'CONGRESSIONAL_DIST' : cong_dist}
def extract_upper_house_dist(self, input_dict):
return {'UPPER_HOUSE_DIST' : input_dict['nc_senate_abbrv']}
def extract_lower_house_dist(self, input_dict):
return {'LOWER_HOUSE_DIST' : input_dict['nc_house_abbrv']}
def extract_precinct(self, input_dict):
precinct = input_dict['precinct_abbrv']
precinct_split = precinct
return {'PRECINCT' : precinct,
'PRECINCT_SPLIT' : precinct_split}
def extract_county_board_dist(self, input_dict):
return {'COUNTY_BOARD_DIST' : None}
def extract_school_board_dist(self, input_dict):
return {'SCHOOL_BOARD_DIST' : input_dict['school_dist_abbrv']}
if __name__ == '__main__':
preparer = StatePreparer(*sys.argv[1:])
preparer.process()
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""WSGI server interface to Python runtime.
WSGI-compliant interface between the Python runtime and user-provided Python
code.
"""
import logging
import sys
import types
from google.appengine import runtime
from google.appengine.api import lib_config
_DEADLINE_DURING_LOADING = 22
class Error(Exception):
pass
class InvalidResponseError(Error):
"""An error indicating that the response is invalid."""
pass
def _GetTypeName(x):
"""Returns a user-friendly name descriping the given object's type."""
if type(x) is types.InstanceType:
return x.__class__.__name__
else:
return type(x).__name__
class WsgiRequest(object):
"""A single WSGI request."""
def __init__(self, environ, handler_name, url, post_data, error):
"""Creates a single WSGI request.
Creates a request for handler_name in the form 'path.to.handler' for url
with the environment contained in environ.
Args:
environ: A dict containing the environ for this request (e.g. like from
os.environ).
handler_name: A str containing the user-specified handler to use for this
request as specified in the script field of a handler in app.yaml
using the Python dot notation; e.g. 'package.module.application'.
url: An urlparse.SplitResult instance containing the request url.
post_data: A stream containing the post data for this request.
error: A stream into which errors are to be written.
"""
self._handler = handler_name
self._status = 500
self._response_headers = []
self._started_handling = False
self._body = []
self._written_body = []
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = False
environ['wsgi.version'] = (1, 0)
environ.setdefault('wsgi.multithread', False)
self._error = error
environ['wsgi.url_scheme'] = url.scheme
environ['wsgi.input'] = post_data
environ['wsgi.errors'] = self._error
self._environ = environ
def _Write(self, body_data):
"""Writes some body_data to the response.
Args:
body_data: data to be written.
Raises:
InvalidResponseError: body_data is not a str.
"""
if not isinstance(body_data, str):
raise InvalidResponseError('body_data must be a str, got %r' %
_GetTypeName(body_data))
self._written_body.append(body_data)
def _StartResponse(self, status, response_headers, exc_info=None):
"""A PEP 333 start_response callable.
Implements the start_response behaviour of PEP 333. Sets the status code and
response headers as provided. If exc_info is not None, then the previously
provided status and response headers are replaced; this implementation
buffers the complete response so valid use of exc_info never raises an
exception. Otherwise, _StartResponse may only be called once.
Args:
status: A string containing the status code and status string.
response_headers: a list of pairs representing header keys and values.
exc_info: exception info as obtained from sys.exc_info().
Returns:
A Write method as per PEP 333.
Raises:
InvalidResponseError: The arguments passed are invalid.
"""
if not isinstance(status, str):
raise InvalidResponseError('status must be a str, got %r (%r)' %
(_GetTypeName(status), status))
if not status:
raise InvalidResponseError('status must not be empty')
if not isinstance(response_headers, list):
raise InvalidResponseError('response_headers must be a list, got %r' %
_GetTypeName(response_headers))
for header in response_headers:
if not isinstance(header, tuple):
raise InvalidResponseError('response_headers items must be tuple, '
'got %r' % _GetTypeName(header))
if len(header) != 2:
raise InvalidResponseError('header tuples must have length 2, '
'actual length %d' % len(header))
name, value = header
if not isinstance(name, str):
raise InvalidResponseError('header names must be str, got %r (%r)' %
(_GetTypeName(name), name))
if not isinstance(value, str):
raise InvalidResponseError('header values must be str, '
'got %r (%r) for %r' %
(_GetTypeName(value), value, name))
try:
status_number = int(status.split(' ')[0])
except ValueError:
raise InvalidResponseError('status code %r is not a number' % status)
if status_number < 200 or status_number >= 600:
raise InvalidResponseError('status code must be in the range [200,600), '
'got %d' % status_number)
if exc_info is not None:
self._status = status_number
self._response_headers = response_headers
exc_info = None
elif self._started_handling:
raise InvalidResponseError('_StartResponse may only be called once'
' without exc_info')
else:
self._status = status_number
self._response_headers = response_headers
self._started_handling = True
self._body = []
self._written_body = []
return self._Write
def Handle(self):
"""Handles the request represented by the WsgiRequest object.
Loads the handler from the handler name provided. Calls the handler with the
environ. Any exceptions in loading the user handler and executing it are
caught and logged.
Returns:
A dict containing:
error: App Engine error code. 0 for OK, 1 for error.
response_code: HTTP response code.
headers: A list of tuples (key, value) of HTTP headers.
body: A str of the body of the response
"""
try:
handler = _config_handle.add_wsgi_middleware(self._LoadHandler())
except runtime.DeadlineExceededError:
exc_info = sys.exc_info()
try:
logging.error('', exc_info=exc_info)
except runtime.DeadlineExceededError:
logging.exception('Deadline exception ocurred while logging a '
'deadline exception.')
logging.error('Original exception:', exc_info=exc_info)
return {'error': _DEADLINE_DURING_LOADING}
except:
logging.exception('')
return {'error': 1}
result = None
try:
result = handler(dict(self._environ), self._StartResponse)
for chunk in result:
if not isinstance(chunk, str):
raise InvalidResponseError('handler must return an iterable of str')
self._body.append(chunk)
body = ''.join(self._written_body + self._body)
return {'response_code': self._status, 'headers':
self._response_headers, 'body': body}
except:
logging.exception('')
return {'error': 1}
finally:
if hasattr(result, 'close'):
result.close()
def _LoadHandler(self):
"""Find and return a Python object with name handler_name.
Find and return a Python object specified by self._handler. Packages and
modules are imported as necessary. If successful, the filename of the module
is inserted into environ with key 'PATH_TRANSLATED' if it has one.
Returns:
A Python object.
Raises:
ImportError: An element of the path cannot be resolved.
"""
path = self._handler.split('.')
handler = __import__(path[0])
is_parent_package = True
cumulative_path = path[0]
for name in path[1:]:
if hasattr(handler, '__file__'):
self._environ['PATH_TRANSLATED'] = handler.__file__
is_parent_package = is_parent_package and hasattr(handler, '__path__')
cumulative_path += '.' + name
if hasattr(handler, name):
handler = getattr(handler, name)
elif is_parent_package:
__import__(cumulative_path)
handler = getattr(handler, name)
else:
raise ImportError('%s has no attribute %s' % (handler, name))
return handler
def HandleRequest(environ, handler_name, url, post_data, error):
"""Handle a single WSGI request.
Creates a request for handler_name in the form 'path.to.handler' for url with
the environment contained in environ.
Args:
environ: A dict containing the environ for this request (e.g. like from
os.environ).
handler_name: A str containing the user-specified handler to use for this
request as specified in the script field of a handler in app.yaml using
the Python dot notation; e.g. 'package.module.application'.
url: An urlparse.SplitResult instance containing the request url.
post_data: A stream containing the post data for this request.
error: A stream into which errors are to be written.
Returns:
A dict containing:
error: App Engine error code. 0 for OK, 1 for error.
response_code: HTTP response code.
headers: A list of tuples (key, value) of HTTP headers.
body: A str of the body of the response
"""
return WsgiRequest(environ, handler_name, url, post_data, error).Handle()
_config_handle = lib_config.register(
'webapp',
{'add_wsgi_middleware': lambda app: app})
| |
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import re # noqa: F401
import sys # noqa: F401
import typing
import urllib3
from urllib3._collections import HTTPHeaderDict
from openapi_client import api_client, exceptions
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from openapi_client.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
# query params
QSchema = StrSchema
RequestRequiredQueryParams = typing.TypedDict(
'RequestRequiredQueryParams',
{
'q': QSchema,
}
)
RequestOptionalQueryParams = typing.TypedDict(
'RequestOptionalQueryParams',
{
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_q = api_client.QueryParameter(
name="q",
style=api_client.ParameterStyle.FORM,
schema=QSchema,
required=True,
explode=True,
)
_path = '/blue/rest/search/'
_method = 'GET'
_auth = [
'jenkins_auth',
]
SchemaFor200ResponseBodyApplicationJson = StrSchema
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: Unset = unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
)
@dataclass
class ApiResponseFor403(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_403 = api_client.OpenApiResponse(
response_cls=ApiResponseFor403,
)
_status_code_to_response = {
'200': _response_for_200,
'401': _response_for_401,
'403': _response_for_403,
}
_all_accept_content_types = (
'application/json',
)
class Search(api_client.Api):
def search(
self: api_client.Api,
query_params: RequestQueryParams = frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs(RequestQueryParams, query_params)
_query_params = []
for parameter in (
request_query_q,
):
parameter_data = query_params.get(parameter.name, unset)
if parameter_data is unset:
continue
serialized_data = parameter.serialize(parameter_data)
_query_params.extend(serialized_data)
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=_path,
method=_method,
query_params=tuple(_query_params),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gym.spaces import Discrete
import numpy as np
from scipy.stats import entropy
import ray
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models import ModelCatalog, Categorical
from ray.rllib.utils.annotations import override
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.policy.tf_policy import TFPolicy, \
LearningRateSchedule
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
Q_SCOPE = "q_func"
Q_TARGET_SCOPE = "target_q_func"
# Importance sampling weights for prioritized replay
PRIO_WEIGHTS = "weights"
class QLoss(object):
def __init__(self,
q_t_selected,
q_logits_t_selected,
q_tp1_best,
q_dist_tp1_best,
importance_weights,
rewards,
done_mask,
gamma=0.99,
n_step=1,
num_atoms=1,
v_min=-10.0,
v_max=10.0):
if num_atoms > 1:
# Distributional Q-learning which corresponds to an entropy loss
z = tf.range(num_atoms, dtype=tf.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
# (batch_size, 1) * (1, num_atoms) = (batch_size, num_atoms)
r_tau = tf.expand_dims(
rewards, -1) + gamma**n_step * tf.expand_dims(
1.0 - done_mask, -1) * tf.expand_dims(z, 0)
r_tau = tf.clip_by_value(r_tau, v_min, v_max)
b = (r_tau - v_min) / ((v_max - v_min) / float(num_atoms - 1))
lb = tf.floor(b)
ub = tf.ceil(b)
# indispensable judgement which is missed in most implementations
# when b happens to be an integer, lb == ub, so pr_j(s', a*) will
# be discarded because (ub-b) == (b-lb) == 0
floor_equal_ceil = tf.to_float(tf.less(ub - lb, 0.5))
l_project = tf.one_hot(
tf.cast(lb, dtype=tf.int32),
num_atoms) # (batch_size, num_atoms, num_atoms)
u_project = tf.one_hot(
tf.cast(ub, dtype=tf.int32),
num_atoms) # (batch_size, num_atoms, num_atoms)
ml_delta = q_dist_tp1_best * (ub - b + floor_equal_ceil)
mu_delta = q_dist_tp1_best * (b - lb)
ml_delta = tf.reduce_sum(
l_project * tf.expand_dims(ml_delta, -1), axis=1)
mu_delta = tf.reduce_sum(
u_project * tf.expand_dims(mu_delta, -1), axis=1)
m = ml_delta + mu_delta
# Rainbow paper claims that using this cross entropy loss for
# priority is robust and insensitive to `prioritized_replay_alpha`
self.td_error = tf.nn.softmax_cross_entropy_with_logits(
labels=m, logits=q_logits_t_selected)
self.loss = tf.reduce_mean(self.td_error * importance_weights)
self.stats = {
# TODO: better Q stats for dist dqn
"mean_td_error": tf.reduce_mean(self.td_error),
}
else:
q_tp1_best_masked = (1.0 - done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rewards + gamma**n_step * q_tp1_best_masked
# compute the error (potentially clipped)
self.td_error = (
q_t_selected - tf.stop_gradient(q_t_selected_target))
self.loss = tf.reduce_mean(
importance_weights * _huber_loss(self.td_error))
self.stats = {
"mean_q": tf.reduce_mean(q_t_selected),
"min_q": tf.reduce_min(q_t_selected),
"max_q": tf.reduce_max(q_t_selected),
"mean_td_error": tf.reduce_mean(self.td_error),
}
class QNetwork(object):
def __init__(self,
model,
num_actions,
dueling=False,
hiddens=[256],
use_noisy=False,
num_atoms=1,
v_min=-10.0,
v_max=10.0,
sigma0=0.5,
parameter_noise=False):
self.model = model
with tf.variable_scope("action_value"):
if hiddens:
action_out = model.last_layer
for i in range(len(hiddens)):
if use_noisy:
action_out = self.noisy_layer(
"hidden_%d" % i, action_out, hiddens[i], sigma0)
elif parameter_noise:
import tensorflow.contrib.layers as layers
action_out = layers.fully_connected(
action_out,
num_outputs=hiddens[i],
activation_fn=tf.nn.relu,
normalizer_fn=layers.layer_norm)
else:
action_out = tf.layers.dense(
action_out,
units=hiddens[i],
activation=tf.nn.relu)
else:
# Avoid postprocessing the outputs. This enables custom models
# to be used for parametric action DQN.
action_out = model.outputs
if use_noisy:
action_scores = self.noisy_layer(
"output",
action_out,
num_actions * num_atoms,
sigma0,
non_linear=False)
elif hiddens:
action_scores = tf.layers.dense(
action_out, units=num_actions * num_atoms, activation=None)
else:
action_scores = model.outputs
if num_atoms > 1:
# Distributional Q-learning uses a discrete support z
# to represent the action value distribution
z = tf.range(num_atoms, dtype=tf.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
support_logits_per_action = tf.reshape(
tensor=action_scores, shape=(-1, num_actions, num_atoms))
support_prob_per_action = tf.nn.softmax(
logits=support_logits_per_action)
action_scores = tf.reduce_sum(
input_tensor=z * support_prob_per_action, axis=-1)
self.logits = support_logits_per_action
self.dist = support_prob_per_action
else:
self.logits = tf.expand_dims(tf.ones_like(action_scores), -1)
self.dist = tf.expand_dims(tf.ones_like(action_scores), -1)
if dueling:
with tf.variable_scope("state_value"):
state_out = model.last_layer
for i in range(len(hiddens)):
if use_noisy:
state_out = self.noisy_layer("dueling_hidden_%d" % i,
state_out, hiddens[i],
sigma0)
elif parameter_noise:
state_out = tf.contrib.layers.fully_connected(
state_out,
num_outputs=hiddens[i],
activation_fn=tf.nn.relu,
normalizer_fn=tf.contrib.layers.layer_norm)
else:
state_out = tf.layers.dense(
state_out, units=hiddens[i], activation=tf.nn.relu)
if use_noisy:
state_score = self.noisy_layer(
"dueling_output",
state_out,
num_atoms,
sigma0,
non_linear=False)
else:
state_score = tf.layers.dense(
state_out, units=num_atoms, activation=None)
if num_atoms > 1:
support_logits_per_action_mean = tf.reduce_mean(
support_logits_per_action, 1)
support_logits_per_action_centered = (
support_logits_per_action - tf.expand_dims(
support_logits_per_action_mean, 1))
support_logits_per_action = tf.expand_dims(
state_score, 1) + support_logits_per_action_centered
support_prob_per_action = tf.nn.softmax(
logits=support_logits_per_action)
self.value = tf.reduce_sum(
input_tensor=z * support_prob_per_action, axis=-1)
self.logits = support_logits_per_action
self.dist = support_prob_per_action
else:
action_scores_mean = _reduce_mean_ignore_inf(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(
action_scores_mean, 1)
self.value = state_score + action_scores_centered
else:
self.value = action_scores
def f_epsilon(self, x):
return tf.sign(x) * tf.sqrt(tf.abs(x))
def noisy_layer(self, prefix, action_in, out_size, sigma0,
non_linear=True):
"""
a common dense layer: y = w^{T}x + b
a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x +
(b+\epsilon_b*\sigma_b)
where \epsilon are random variables sampled from factorized normal
distributions and \sigma are trainable variables which are expected to
vanish along the training procedure
"""
import tensorflow.contrib.layers as layers
in_size = int(action_in.shape[1])
epsilon_in = tf.random_normal(shape=[in_size])
epsilon_out = tf.random_normal(shape=[out_size])
epsilon_in = self.f_epsilon(epsilon_in)
epsilon_out = self.f_epsilon(epsilon_out)
epsilon_w = tf.matmul(
a=tf.expand_dims(epsilon_in, -1), b=tf.expand_dims(epsilon_out, 0))
epsilon_b = epsilon_out
sigma_w = tf.get_variable(
name=prefix + "_sigma_w",
shape=[in_size, out_size],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(
minval=-1.0 / np.sqrt(float(in_size)),
maxval=1.0 / np.sqrt(float(in_size))))
# TF noise generation can be unreliable on GPU
# If generating the noise on the CPU,
# lowering sigma0 to 0.1 may be helpful
sigma_b = tf.get_variable(
name=prefix + "_sigma_b",
shape=[out_size],
dtype=tf.float32, # 0.5~GPU, 0.1~CPU
initializer=tf.constant_initializer(
sigma0 / np.sqrt(float(in_size))))
w = tf.get_variable(
name=prefix + "_fc_w",
shape=[in_size, out_size],
dtype=tf.float32,
initializer=layers.xavier_initializer())
b = tf.get_variable(
name=prefix + "_fc_b",
shape=[out_size],
dtype=tf.float32,
initializer=tf.zeros_initializer())
action_activation = tf.nn.xw_plus_b(action_in, w + sigma_w * epsilon_w,
b + sigma_b * epsilon_b)
if not non_linear:
return action_activation
return tf.nn.relu(action_activation)
class QValuePolicy(object):
def __init__(self, q_values, observations, num_actions, stochastic, eps,
softmax, softmax_temp):
if softmax:
action_dist = Categorical(q_values / softmax_temp)
self.action = action_dist.sample()
self.action_prob = action_dist.sampled_action_prob()
return
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations)[0]
# Special case masked out actions (q_value ~= -inf) so that we don't
# even consider them for exploration.
random_valid_action_logits = tf.where(
tf.equal(q_values, tf.float32.min),
tf.ones_like(q_values) * tf.float32.min, tf.ones_like(q_values))
random_actions = tf.squeeze(
tf.multinomial(random_valid_action_logits, 1), axis=1)
chose_random = tf.random_uniform(
tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions,
deterministic_actions)
self.action = tf.cond(stochastic, lambda: stochastic_actions,
lambda: deterministic_actions)
self.action_prob = None
class ExplorationStateMixin(object):
def __init__(self, obs_space, action_space, config):
self.cur_epsilon = 1.0
self.stochastic = tf.placeholder(tf.bool, (), name="stochastic")
self.eps = tf.placeholder(tf.float32, (), name="eps")
def add_parameter_noise(self):
if self.config["parameter_noise"]:
self.sess.run(self.add_noise_op)
def set_epsilon(self, epsilon):
self.cur_epsilon = epsilon
@override(Policy)
def get_state(self):
return [TFPolicy.get_state(self), self.cur_epsilon]
@override(Policy)
def set_state(self, state):
TFPolicy.set_state(self, state[0])
self.set_epsilon(state[1])
class TargetNetworkMixin(object):
def __init__(self, obs_space, action_space, config):
# update_target_fn will be called periodically to copy Q network to
# target Q network
update_target_expr = []
assert len(self.q_func_vars) == len(self.target_q_func_vars), \
(self.q_func_vars, self.target_q_func_vars)
for var, var_target in zip(self.q_func_vars, self.target_q_func_vars):
update_target_expr.append(var_target.assign(var))
self.update_target_expr = tf.group(*update_target_expr)
def update_target(self):
return self.get_session().run(self.update_target_expr)
class ComputeTDErrorMixin(object):
def compute_td_error(self, obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
if not self.loss_initialized():
return np.zeros_like(rew_t)
td_err = self.get_session().run(
self.loss.td_error,
feed_dict={
self.get_placeholder(SampleBatch.CUR_OBS): [
np.array(ob) for ob in obs_t
],
self.get_placeholder(SampleBatch.ACTIONS): act_t,
self.get_placeholder(SampleBatch.REWARDS): rew_t,
self.get_placeholder(SampleBatch.NEXT_OBS): [
np.array(ob) for ob in obs_tp1
],
self.get_placeholder(SampleBatch.DONES): done_mask,
self.get_placeholder(PRIO_WEIGHTS): importance_weights,
})
return td_err
def postprocess_trajectory(policy,
sample_batch,
other_agent_batches=None,
episode=None):
if policy.config["parameter_noise"]:
# adjust the sigma of parameter space noise
states = [list(x) for x in sample_batch.columns(["obs"])][0]
noisy_action_distribution = policy.get_session().run(
policy.action_probs, feed_dict={policy.cur_observations: states})
policy.get_session().run(policy.remove_noise_op)
clean_action_distribution = policy.get_session().run(
policy.action_probs, feed_dict={policy.cur_observations: states})
distance_in_action_space = np.mean(
entropy(clean_action_distribution.T, noisy_action_distribution.T))
policy.pi_distance = distance_in_action_space
if (distance_in_action_space <
-np.log(1 - policy.cur_epsilon +
policy.cur_epsilon / policy.num_actions)):
policy.parameter_noise_sigma_val *= 1.01
else:
policy.parameter_noise_sigma_val /= 1.01
policy.parameter_noise_sigma.load(
policy.parameter_noise_sigma_val, session=policy.get_session())
return _postprocess_dqn(policy, sample_batch)
def build_q_networks(policy, input_dict, observation_space, action_space,
config):
if not isinstance(action_space, Discrete):
raise UnsupportedSpaceException(
"Action space {} is not supported for DQN.".format(action_space))
# Action Q network
with tf.variable_scope(Q_SCOPE) as scope:
q_values, q_logits, q_dist, _ = _build_q_network(
policy, input_dict[SampleBatch.CUR_OBS], observation_space,
action_space)
policy.q_values = q_values
policy.q_func_vars = _scope_vars(scope.name)
# Noise vars for Q network except for layer normalization vars
if config["parameter_noise"]:
_build_parameter_noise(
policy,
[var for var in policy.q_func_vars if "LayerNorm" not in var.name])
policy.action_probs = tf.nn.softmax(policy.q_values)
# Action outputs
qvp = QValuePolicy(q_values, input_dict[SampleBatch.CUR_OBS],
action_space.n, policy.stochastic, policy.eps,
policy.config["soft_q"], policy.config["softmax_temp"])
policy.output_actions, policy.action_prob = qvp.action, qvp.action_prob
return policy.output_actions, policy.action_prob
def _build_parameter_noise(policy, pnet_params):
policy.parameter_noise_sigma_val = 1.0
policy.parameter_noise_sigma = tf.get_variable(
initializer=tf.constant_initializer(policy.parameter_noise_sigma_val),
name="parameter_noise_sigma",
shape=(),
trainable=False,
dtype=tf.float32)
policy.parameter_noise = list()
# No need to add any noise on LayerNorm parameters
for var in pnet_params:
noise_var = tf.get_variable(
name=var.name.split(":")[0] + "_noise",
shape=var.shape,
initializer=tf.constant_initializer(.0),
trainable=False)
policy.parameter_noise.append(noise_var)
remove_noise_ops = list()
for var, var_noise in zip(pnet_params, policy.parameter_noise):
remove_noise_ops.append(tf.assign_add(var, -var_noise))
policy.remove_noise_op = tf.group(*tuple(remove_noise_ops))
generate_noise_ops = list()
for var_noise in policy.parameter_noise:
generate_noise_ops.append(
tf.assign(
var_noise,
tf.random_normal(
shape=var_noise.shape,
stddev=policy.parameter_noise_sigma)))
with tf.control_dependencies(generate_noise_ops):
add_noise_ops = list()
for var, var_noise in zip(pnet_params, policy.parameter_noise):
add_noise_ops.append(tf.assign_add(var, var_noise))
policy.add_noise_op = tf.group(*tuple(add_noise_ops))
policy.pi_distance = None
def build_q_losses(policy, batch_tensors):
# q network evaluation
with tf.variable_scope(Q_SCOPE, reuse=True):
prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
q_t, q_logits_t, q_dist_t, model = _build_q_network(
policy, batch_tensors[SampleBatch.CUR_OBS],
policy.observation_space, policy.action_space)
policy.q_batchnorm_update_ops = list(
set(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) - prev_update_ops)
# target q network evalution
with tf.variable_scope(Q_TARGET_SCOPE) as scope:
q_tp1, q_logits_tp1, q_dist_tp1, _ = _build_q_network(
policy, batch_tensors[SampleBatch.NEXT_OBS],
policy.observation_space, policy.action_space)
policy.target_q_func_vars = _scope_vars(scope.name)
# q scores for actions which we know were selected in the given state.
one_hot_selection = tf.one_hot(batch_tensors[SampleBatch.ACTIONS],
policy.action_space.n)
q_t_selected = tf.reduce_sum(q_t * one_hot_selection, 1)
q_logits_t_selected = tf.reduce_sum(
q_logits_t * tf.expand_dims(one_hot_selection, -1), 1)
# compute estimate of best possible value starting from state at t + 1
if policy.config["double_q"]:
with tf.variable_scope(Q_SCOPE, reuse=True):
q_tp1_using_online_net, q_logits_tp1_using_online_net, \
q_dist_tp1_using_online_net, _ = _build_q_network(
policy,
batch_tensors[SampleBatch.NEXT_OBS],
policy.observation_space, policy.action_space)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best_one_hot_selection = tf.one_hot(q_tp1_best_using_online_net,
policy.action_space.n)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_dist_tp1_best = tf.reduce_sum(
q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1)
else:
q_tp1_best_one_hot_selection = tf.one_hot(
tf.argmax(q_tp1, 1), policy.action_space.n)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_dist_tp1_best = tf.reduce_sum(
q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1)
policy.loss = _build_q_loss(
q_t_selected, q_logits_t_selected, q_tp1_best, q_dist_tp1_best,
batch_tensors[SampleBatch.REWARDS], batch_tensors[SampleBatch.DONES],
batch_tensors[PRIO_WEIGHTS], policy.config)
return policy.loss.loss
def adam_optimizer(policy, config):
return tf.train.AdamOptimizer(
learning_rate=policy.cur_lr, epsilon=config["adam_epsilon"])
def clip_gradients(policy, optimizer, loss):
if policy.config["grad_norm_clipping"] is not None:
grads_and_vars = _minimize_and_clip(
optimizer,
loss,
var_list=policy.q_func_vars,
clip_val=policy.config["grad_norm_clipping"])
else:
grads_and_vars = optimizer.compute_gradients(
loss, var_list=policy.q_func_vars)
grads_and_vars = [(g, v) for (g, v) in grads_and_vars if g is not None]
return grads_and_vars
def exploration_setting_inputs(policy):
return {
policy.stochastic: True,
policy.eps: policy.cur_epsilon,
}
def build_q_stats(policy, batch_tensors):
return dict({
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
}, **policy.loss.stats)
def setup_early_mixins(policy, obs_space, action_space, config):
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
ExplorationStateMixin.__init__(policy, obs_space, action_space, config)
def setup_late_mixins(policy, obs_space, action_space, config):
TargetNetworkMixin.__init__(policy, obs_space, action_space, config)
def _build_q_network(policy, obs, obs_space, action_space):
config = policy.config
qnet = QNetwork(
ModelCatalog.get_model({
"obs": obs,
"is_training": policy._get_is_training_placeholder(),
}, obs_space, action_space, action_space.n, config["model"]),
action_space.n, config["dueling"], config["hiddens"], config["noisy"],
config["num_atoms"], config["v_min"], config["v_max"],
config["sigma0"], config["parameter_noise"])
return qnet.value, qnet.logits, qnet.dist, qnet.model
def _build_q_value_policy(policy, q_values):
policy = QValuePolicy(q_values, policy.cur_observations,
policy.num_actions, policy.stochastic, policy.eps,
policy.config["soft_q"],
policy.config["softmax_temp"])
return policy.action, policy.action_prob
def _build_q_loss(q_t_selected, q_logits_t_selected, q_tp1_best,
q_dist_tp1_best, rewards, dones, importance_weights, config):
return QLoss(q_t_selected, q_logits_t_selected, q_tp1_best,
q_dist_tp1_best, importance_weights, rewards,
tf.cast(dones, tf.float32), config["gamma"], config["n_step"],
config["num_atoms"], config["v_min"], config["v_max"])
def _adjust_nstep(n_step, gamma, obs, actions, rewards, new_obs, dones):
"""Rewrites the given trajectory fragments to encode n-step rewards.
reward[i] = (
reward[i] * gamma**0 +
reward[i+1] * gamma**1 +
... +
reward[i+n_step-1] * gamma**(n_step-1))
The ith new_obs is also adjusted to point to the (i+n_step-1)'th new obs.
At the end of the trajectory, n is truncated to fit in the traj length.
"""
assert not any(dones[:-1]), "Unexpected done in middle of trajectory"
traj_length = len(rewards)
for i in range(traj_length):
for j in range(1, n_step):
if i + j < traj_length:
new_obs[i] = new_obs[i + j]
dones[i] = dones[i + j]
rewards[i] += gamma**j * rewards[i + j]
def _postprocess_dqn(policy, batch):
# N-step Q adjustments
if policy.config["n_step"] > 1:
_adjust_nstep(policy.config["n_step"], policy.config["gamma"],
batch[SampleBatch.CUR_OBS], batch[SampleBatch.ACTIONS],
batch[SampleBatch.REWARDS], batch[SampleBatch.NEXT_OBS],
batch[SampleBatch.DONES])
if PRIO_WEIGHTS not in batch:
batch[PRIO_WEIGHTS] = np.ones_like(batch[SampleBatch.REWARDS])
# Prioritize on the worker side
if batch.count > 0 and policy.config["worker_side_prioritization"]:
td_errors = policy.compute_td_error(
batch[SampleBatch.CUR_OBS], batch[SampleBatch.ACTIONS],
batch[SampleBatch.REWARDS], batch[SampleBatch.NEXT_OBS],
batch[SampleBatch.DONES], batch[PRIO_WEIGHTS])
new_priorities = (
np.abs(td_errors) + policy.config["prioritized_replay_eps"])
batch.data[PRIO_WEIGHTS] = new_priorities
return batch
def _reduce_mean_ignore_inf(x, axis):
"""Same as tf.reduce_mean() but ignores -inf values."""
mask = tf.not_equal(x, tf.float32.min)
x_zeroed = tf.where(mask, x, tf.zeros_like(x))
return (tf.reduce_sum(x_zeroed, axis) / tf.reduce_sum(
tf.cast(mask, tf.float32), axis))
def _huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5, delta * (tf.abs(x) - 0.5 * delta))
def _minimize_and_clip(optimizer, objective, var_list, clip_val=10):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return gradients
def _scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as
trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES
if trainable_only else tf.GraphKeys.VARIABLES,
scope=scope if isinstance(scope, str) else scope.name)
DQNTFPolicy = build_tf_policy(
name="DQNTFPolicy",
get_default_config=lambda: ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG,
make_action_sampler=build_q_networks,
loss_fn=build_q_losses,
stats_fn=build_q_stats,
postprocess_fn=postprocess_trajectory,
optimizer_fn=adam_optimizer,
gradients_fn=clip_gradients,
extra_action_feed_fn=exploration_setting_inputs,
extra_action_fetches_fn=lambda policy: {"q_values": policy.q_values},
extra_learn_fetches_fn=lambda policy: {"td_error": policy.loss.td_error},
update_ops_fn=lambda policy: policy.q_batchnorm_update_ops,
before_init=setup_early_mixins,
after_init=setup_late_mixins,
obs_include_prev_action_reward=False,
mixins=[
ExplorationStateMixin,
TargetNetworkMixin,
ComputeTDErrorMixin,
LearningRateSchedule,
])
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from google.appengine._internal.antlr3 import *
from google.appengine._internal.antlr3.compat import set, frozenset
from google.appengine._internal.antlr3.tree import *
HIDDEN = BaseRecognizer.HIDDEN
FUNCTION=7
GEO_POINT_FN=29
FIX=30
ESC=34
FUZZY=8
OCTAL_ESC=36
NOT=27
AND=25
DISTANCE_FN=28
ESCAPED_CHAR=40
EOF=-1
LPAREN=23
HAS=22
RPAREN=24
QUOTE=33
CHAR_SEQ=37
START_CHAR=41
ARGS=4
DIGIT=38
EQ=21
NE=20
T__43=43
LESSTHAN=17
GE=18
T__44=44
T__45=45
CONJUNCTION=5
UNICODE_ESC=35
HEX_DIGIT=42
LITERAL=10
VALUE=14
TEXT=32
REWRITE=31
SEQUENCE=13
DISJUNCTION=6
WS=15
NEGATION=11
OR=26
GT=19
GLOBAL=9
LE=16
MID_CHAR=39
STRING=12
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>",
"ARGS", "CONJUNCTION", "DISJUNCTION", "FUNCTION", "FUZZY", "GLOBAL",
"LITERAL", "NEGATION", "STRING", "SEQUENCE", "VALUE", "WS", "LE", "LESSTHAN",
"GE", "GT", "NE", "EQ", "HAS", "LPAREN", "RPAREN", "AND", "OR", "NOT",
"DISTANCE_FN", "GEO_POINT_FN", "FIX", "REWRITE", "TEXT", "QUOTE", "ESC",
"UNICODE_ESC", "OCTAL_ESC", "CHAR_SEQ", "DIGIT", "MID_CHAR", "ESCAPED_CHAR",
"START_CHAR", "HEX_DIGIT", "'-'", "','", "'\\\\'"
]
class QueryParser(Parser):
grammarFileName = ""
antlr_version = version_str_to_tuple("3.1.1")
antlr_version_str = "3.1.1"
tokenNames = tokenNames
def __init__(self, input, state=None):
if state is None:
state = RecognizerSharedState()
Parser.__init__(self, input, state)
self.dfa3 = self.DFA3(
self, 3,
eot = self.DFA3_eot,
eof = self.DFA3_eof,
min = self.DFA3_min,
max = self.DFA3_max,
accept = self.DFA3_accept,
special = self.DFA3_special,
transition = self.DFA3_transition
)
self.dfa5 = self.DFA5(
self, 5,
eot = self.DFA5_eot,
eof = self.DFA5_eof,
min = self.DFA5_min,
max = self.DFA5_max,
accept = self.DFA5_accept,
special = self.DFA5_special,
transition = self.DFA5_transition
)
self.dfa6 = self.DFA6(
self, 6,
eot = self.DFA6_eot,
eof = self.DFA6_eof,
min = self.DFA6_min,
max = self.DFA6_max,
accept = self.DFA6_accept,
special = self.DFA6_special,
transition = self.DFA6_transition
)
self.dfa8 = self.DFA8(
self, 8,
eot = self.DFA8_eot,
eof = self.DFA8_eof,
min = self.DFA8_min,
max = self.DFA8_max,
accept = self.DFA8_accept,
special = self.DFA8_special,
transition = self.DFA8_transition
)
self._adaptor = CommonTreeAdaptor()
def getTreeAdaptor(self):
return self._adaptor
def setTreeAdaptor(self, adaptor):
self._adaptor = adaptor
adaptor = property(getTreeAdaptor, setTreeAdaptor)
class query_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def query(self, ):
retval = self.query_return()
retval.start = self.input.LT(1)
root_0 = None
WS1 = None
WS3 = None
EOF4 = None
expression2 = None
WS1_tree = None
WS3_tree = None
EOF4_tree = None
stream_WS = RewriteRuleTokenStream(self._adaptor, "token WS")
stream_EOF = RewriteRuleTokenStream(self._adaptor, "token EOF")
stream_expression = RewriteRuleSubtreeStream(self._adaptor, "rule expression")
try:
try:
pass
while True:
alt1 = 2
LA1_0 = self.input.LA(1)
if (LA1_0 == WS) :
alt1 = 1
if alt1 == 1:
pass
WS1=self.match(self.input, WS, self.FOLLOW_WS_in_query112)
stream_WS.add(WS1)
else:
break
self._state.following.append(self.FOLLOW_expression_in_query115)
expression2 = self.expression()
self._state.following.pop()
stream_expression.add(expression2.tree)
while True:
alt2 = 2
LA2_0 = self.input.LA(1)
if (LA2_0 == WS) :
alt2 = 1
if alt2 == 1:
pass
WS3=self.match(self.input, WS, self.FOLLOW_WS_in_query117)
stream_WS.add(WS3)
else:
break
EOF4=self.match(self.input, EOF, self.FOLLOW_EOF_in_query120)
stream_EOF.add(EOF4)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
self._adaptor.addChild(root_0, stream_expression.nextTree())
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class expression_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def expression(self, ):
retval = self.expression_return()
retval.start = self.input.LT(1)
root_0 = None
sequence5 = None
andOp6 = None
sequence7 = None
stream_sequence = RewriteRuleSubtreeStream(self._adaptor, "rule sequence")
stream_andOp = RewriteRuleSubtreeStream(self._adaptor, "rule andOp")
try:
try:
pass
self._state.following.append(self.FOLLOW_sequence_in_expression139)
sequence5 = self.sequence()
self._state.following.pop()
stream_sequence.add(sequence5.tree)
while True:
alt3 = 2
alt3 = self.dfa3.predict(self.input)
if alt3 == 1:
pass
self._state.following.append(self.FOLLOW_andOp_in_expression142)
andOp6 = self.andOp()
self._state.following.pop()
stream_andOp.add(andOp6.tree)
self._state.following.append(self.FOLLOW_sequence_in_expression144)
sequence7 = self.sequence()
self._state.following.pop()
stream_sequence.add(sequence7.tree)
else:
break
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(CONJUNCTION, "CONJUNCTION"), root_1)
if not (stream_sequence.hasNext()):
raise RewriteEarlyExitException()
while stream_sequence.hasNext():
self._adaptor.addChild(root_1, stream_sequence.nextTree())
stream_sequence.reset()
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class sequence_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def sequence(self, ):
retval = self.sequence_return()
retval.start = self.input.LT(1)
root_0 = None
WS9 = None
factor8 = None
factor10 = None
WS9_tree = None
stream_WS = RewriteRuleTokenStream(self._adaptor, "token WS")
stream_factor = RewriteRuleSubtreeStream(self._adaptor, "rule factor")
try:
try:
pass
self._state.following.append(self.FOLLOW_factor_in_sequence170)
factor8 = self.factor()
self._state.following.pop()
stream_factor.add(factor8.tree)
while True:
alt5 = 2
alt5 = self.dfa5.predict(self.input)
if alt5 == 1:
pass
cnt4 = 0
while True:
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == WS) :
alt4 = 1
if alt4 == 1:
pass
WS9=self.match(self.input, WS, self.FOLLOW_WS_in_sequence173)
stream_WS.add(WS9)
else:
if cnt4 >= 1:
break
eee = EarlyExitException(4, self.input)
raise eee
cnt4 += 1
self._state.following.append(self.FOLLOW_factor_in_sequence176)
factor10 = self.factor()
self._state.following.pop()
stream_factor.add(factor10.tree)
else:
break
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(SEQUENCE, "SEQUENCE"), root_1)
if not (stream_factor.hasNext()):
raise RewriteEarlyExitException()
while stream_factor.hasNext():
self._adaptor.addChild(root_1, stream_factor.nextTree())
stream_factor.reset()
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class factor_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def factor(self, ):
retval = self.factor_return()
retval.start = self.input.LT(1)
root_0 = None
term11 = None
orOp12 = None
term13 = None
stream_orOp = RewriteRuleSubtreeStream(self._adaptor, "rule orOp")
stream_term = RewriteRuleSubtreeStream(self._adaptor, "rule term")
try:
try:
pass
self._state.following.append(self.FOLLOW_term_in_factor202)
term11 = self.term()
self._state.following.pop()
stream_term.add(term11.tree)
while True:
alt6 = 2
alt6 = self.dfa6.predict(self.input)
if alt6 == 1:
pass
self._state.following.append(self.FOLLOW_orOp_in_factor205)
orOp12 = self.orOp()
self._state.following.pop()
stream_orOp.add(orOp12.tree)
self._state.following.append(self.FOLLOW_term_in_factor207)
term13 = self.term()
self._state.following.pop()
stream_term.add(term13.tree)
else:
break
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(DISJUNCTION, "DISJUNCTION"), root_1)
if not (stream_term.hasNext()):
raise RewriteEarlyExitException()
while stream_term.hasNext():
self._adaptor.addChild(root_1, stream_term.nextTree())
stream_term.reset()
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class term_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def term(self, ):
retval = self.term_return()
retval.start = self.input.LT(1)
root_0 = None
notOp14 = None
primitive15 = None
primitive16 = None
stream_notOp = RewriteRuleSubtreeStream(self._adaptor, "rule notOp")
stream_primitive = RewriteRuleSubtreeStream(self._adaptor, "rule primitive")
try:
try:
alt7 = 2
LA7_0 = self.input.LA(1)
if (LA7_0 == NOT or LA7_0 == 43) :
alt7 = 1
elif (LA7_0 == LPAREN or (DISTANCE_FN <= LA7_0 <= QUOTE)) :
alt7 = 2
else:
nvae = NoViableAltException("", 7, 0, self.input)
raise nvae
if alt7 == 1:
pass
self._state.following.append(self.FOLLOW_notOp_in_term231)
notOp14 = self.notOp()
self._state.following.pop()
stream_notOp.add(notOp14.tree)
self._state.following.append(self.FOLLOW_primitive_in_term233)
primitive15 = self.primitive()
self._state.following.pop()
stream_primitive.add(primitive15.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(NEGATION, "NEGATION"), root_1)
self._adaptor.addChild(root_1, stream_primitive.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
elif alt7 == 2:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_primitive_in_term247)
primitive16 = self.primitive()
self._state.following.pop()
self._adaptor.addChild(root_0, primitive16.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class primitive_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def primitive(self, ):
retval = self.primitive_return()
retval.start = self.input.LT(1)
root_0 = None
restriction17 = None
composite18 = None
item19 = None
stream_item = RewriteRuleSubtreeStream(self._adaptor, "rule item")
try:
try:
alt8 = 3
alt8 = self.dfa8.predict(self.input)
if alt8 == 1:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_restriction_in_primitive263)
restriction17 = self.restriction()
self._state.following.pop()
self._adaptor.addChild(root_0, restriction17.tree)
elif alt8 == 2:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_composite_in_primitive269)
composite18 = self.composite()
self._state.following.pop()
self._adaptor.addChild(root_0, composite18.tree)
elif alt8 == 3:
pass
self._state.following.append(self.FOLLOW_item_in_primitive275)
item19 = self.item()
self._state.following.pop()
stream_item.add(item19.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(HAS, "HAS"), root_1)
self._adaptor.addChild(root_1, self._adaptor.createFromType(GLOBAL, "GLOBAL"))
self._adaptor.addChild(root_1, stream_item.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class restriction_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def restriction(self, ):
retval = self.restriction_return()
retval.start = self.input.LT(1)
root_0 = None
comparable20 = None
comparator21 = None
arg22 = None
stream_arg = RewriteRuleSubtreeStream(self._adaptor, "rule arg")
stream_comparable = RewriteRuleSubtreeStream(self._adaptor, "rule comparable")
stream_comparator = RewriteRuleSubtreeStream(self._adaptor, "rule comparator")
try:
try:
pass
self._state.following.append(self.FOLLOW_comparable_in_restriction301)
comparable20 = self.comparable()
self._state.following.pop()
stream_comparable.add(comparable20.tree)
self._state.following.append(self.FOLLOW_comparator_in_restriction303)
comparator21 = self.comparator()
self._state.following.pop()
stream_comparator.add(comparator21.tree)
self._state.following.append(self.FOLLOW_arg_in_restriction305)
arg22 = self.arg()
self._state.following.pop()
stream_arg.add(arg22.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(stream_comparator.nextNode(), root_1)
self._adaptor.addChild(root_1, stream_comparable.nextTree())
self._adaptor.addChild(root_1, stream_arg.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class comparator_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def comparator(self, ):
retval = self.comparator_return()
retval.start = self.input.LT(1)
root_0 = None
x = None
WS23 = None
WS24 = None
x_tree = None
WS23_tree = None
WS24_tree = None
stream_HAS = RewriteRuleTokenStream(self._adaptor, "token HAS")
stream_LESSTHAN = RewriteRuleTokenStream(self._adaptor, "token LESSTHAN")
stream_GE = RewriteRuleTokenStream(self._adaptor, "token GE")
stream_GT = RewriteRuleTokenStream(self._adaptor, "token GT")
stream_WS = RewriteRuleTokenStream(self._adaptor, "token WS")
stream_EQ = RewriteRuleTokenStream(self._adaptor, "token EQ")
stream_LE = RewriteRuleTokenStream(self._adaptor, "token LE")
stream_NE = RewriteRuleTokenStream(self._adaptor, "token NE")
try:
try:
pass
while True:
alt9 = 2
LA9_0 = self.input.LA(1)
if (LA9_0 == WS) :
alt9 = 1
if alt9 == 1:
pass
WS23=self.match(self.input, WS, self.FOLLOW_WS_in_comparator329)
stream_WS.add(WS23)
else:
break
alt10 = 7
LA10 = self.input.LA(1)
if LA10 == LE:
alt10 = 1
elif LA10 == LESSTHAN:
alt10 = 2
elif LA10 == GE:
alt10 = 3
elif LA10 == GT:
alt10 = 4
elif LA10 == NE:
alt10 = 5
elif LA10 == EQ:
alt10 = 6
elif LA10 == HAS:
alt10 = 7
else:
nvae = NoViableAltException("", 10, 0, self.input)
raise nvae
if alt10 == 1:
pass
x=self.match(self.input, LE, self.FOLLOW_LE_in_comparator335)
stream_LE.add(x)
elif alt10 == 2:
pass
x=self.match(self.input, LESSTHAN, self.FOLLOW_LESSTHAN_in_comparator341)
stream_LESSTHAN.add(x)
elif alt10 == 3:
pass
x=self.match(self.input, GE, self.FOLLOW_GE_in_comparator347)
stream_GE.add(x)
elif alt10 == 4:
pass
x=self.match(self.input, GT, self.FOLLOW_GT_in_comparator353)
stream_GT.add(x)
elif alt10 == 5:
pass
x=self.match(self.input, NE, self.FOLLOW_NE_in_comparator359)
stream_NE.add(x)
elif alt10 == 6:
pass
x=self.match(self.input, EQ, self.FOLLOW_EQ_in_comparator365)
stream_EQ.add(x)
elif alt10 == 7:
pass
x=self.match(self.input, HAS, self.FOLLOW_HAS_in_comparator371)
stream_HAS.add(x)
while True:
alt11 = 2
LA11_0 = self.input.LA(1)
if (LA11_0 == WS) :
alt11 = 1
if alt11 == 1:
pass
WS24=self.match(self.input, WS, self.FOLLOW_WS_in_comparator374)
stream_WS.add(WS24)
else:
break
retval.tree = root_0
stream_x = RewriteRuleTokenStream(self._adaptor, "token x", x)
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
self._adaptor.addChild(root_0, stream_x.nextNode())
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class comparable_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def comparable(self, ):
retval = self.comparable_return()
retval.start = self.input.LT(1)
root_0 = None
item25 = None
function26 = None
try:
try:
alt12 = 2
LA12 = self.input.LA(1)
if LA12 == FIX or LA12 == REWRITE or LA12 == TEXT or LA12 == QUOTE:
alt12 = 1
elif LA12 == DISTANCE_FN:
LA12_2 = self.input.LA(2)
if ((WS <= LA12_2 <= HAS)) :
alt12 = 1
elif (LA12_2 == LPAREN) :
alt12 = 2
else:
nvae = NoViableAltException("", 12, 2, self.input)
raise nvae
elif LA12 == GEO_POINT_FN:
LA12_3 = self.input.LA(2)
if ((WS <= LA12_3 <= HAS)) :
alt12 = 1
elif (LA12_3 == LPAREN) :
alt12 = 2
else:
nvae = NoViableAltException("", 12, 3, self.input)
raise nvae
else:
nvae = NoViableAltException("", 12, 0, self.input)
raise nvae
if alt12 == 1:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_item_in_comparable396)
item25 = self.item()
self._state.following.pop()
self._adaptor.addChild(root_0, item25.tree)
elif alt12 == 2:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_function_in_comparable402)
function26 = self.function()
self._state.following.pop()
self._adaptor.addChild(root_0, function26.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class function_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def function(self, ):
retval = self.function_return()
retval.start = self.input.LT(1)
root_0 = None
LPAREN28 = None
RPAREN30 = None
fnname27 = None
arglist29 = None
LPAREN28_tree = None
RPAREN30_tree = None
stream_RPAREN = RewriteRuleTokenStream(self._adaptor, "token RPAREN")
stream_LPAREN = RewriteRuleTokenStream(self._adaptor, "token LPAREN")
stream_arglist = RewriteRuleSubtreeStream(self._adaptor, "rule arglist")
stream_fnname = RewriteRuleSubtreeStream(self._adaptor, "rule fnname")
try:
try:
pass
self._state.following.append(self.FOLLOW_fnname_in_function417)
fnname27 = self.fnname()
self._state.following.pop()
stream_fnname.add(fnname27.tree)
LPAREN28=self.match(self.input, LPAREN, self.FOLLOW_LPAREN_in_function419)
stream_LPAREN.add(LPAREN28)
self._state.following.append(self.FOLLOW_arglist_in_function421)
arglist29 = self.arglist()
self._state.following.pop()
stream_arglist.add(arglist29.tree)
RPAREN30=self.match(self.input, RPAREN, self.FOLLOW_RPAREN_in_function423)
stream_RPAREN.add(RPAREN30)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(FUNCTION, "FUNCTION"), root_1)
self._adaptor.addChild(root_1, stream_fnname.nextTree())
root_2 = self._adaptor.nil()
root_2 = self._adaptor.becomeRoot(self._adaptor.createFromType(ARGS, "ARGS"), root_2)
self._adaptor.addChild(root_2, stream_arglist.nextTree())
self._adaptor.addChild(root_1, root_2)
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class arglist_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def arglist(self, ):
retval = self.arglist_return()
retval.start = self.input.LT(1)
root_0 = None
arg31 = None
sep32 = None
arg33 = None
stream_arg = RewriteRuleSubtreeStream(self._adaptor, "rule arg")
stream_sep = RewriteRuleSubtreeStream(self._adaptor, "rule sep")
try:
try:
alt14 = 2
LA14_0 = self.input.LA(1)
if (LA14_0 == LPAREN or (DISTANCE_FN <= LA14_0 <= QUOTE)) :
alt14 = 1
elif (LA14_0 == RPAREN) :
alt14 = 2
else:
nvae = NoViableAltException("", 14, 0, self.input)
raise nvae
if alt14 == 1:
pass
self._state.following.append(self.FOLLOW_arg_in_arglist452)
arg31 = self.arg()
self._state.following.pop()
stream_arg.add(arg31.tree)
while True:
alt13 = 2
LA13_0 = self.input.LA(1)
if (LA13_0 == WS or LA13_0 == 44) :
alt13 = 1
if alt13 == 1:
pass
self._state.following.append(self.FOLLOW_sep_in_arglist455)
sep32 = self.sep()
self._state.following.pop()
stream_sep.add(sep32.tree)
self._state.following.append(self.FOLLOW_arg_in_arglist457)
arg33 = self.arg()
self._state.following.pop()
stream_arg.add(arg33.tree)
else:
break
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
while stream_arg.hasNext():
self._adaptor.addChild(root_0, stream_arg.nextTree())
stream_arg.reset();
retval.tree = root_0
elif alt14 == 2:
pass
root_0 = self._adaptor.nil()
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class arg_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def arg(self, ):
retval = self.arg_return()
retval.start = self.input.LT(1)
root_0 = None
item34 = None
composite35 = None
function36 = None
try:
try:
alt15 = 3
LA15 = self.input.LA(1)
if LA15 == FIX or LA15 == REWRITE or LA15 == TEXT or LA15 == QUOTE:
alt15 = 1
elif LA15 == DISTANCE_FN:
LA15_2 = self.input.LA(2)
if (LA15_2 == EOF or LA15_2 == WS or LA15_2 == RPAREN or LA15_2 == 44) :
alt15 = 1
elif (LA15_2 == LPAREN) :
alt15 = 3
else:
nvae = NoViableAltException("", 15, 2, self.input)
raise nvae
elif LA15 == GEO_POINT_FN:
LA15_3 = self.input.LA(2)
if (LA15_3 == EOF or LA15_3 == WS or LA15_3 == RPAREN or LA15_3 == 44) :
alt15 = 1
elif (LA15_3 == LPAREN) :
alt15 = 3
else:
nvae = NoViableAltException("", 15, 3, self.input)
raise nvae
elif LA15 == LPAREN:
alt15 = 2
else:
nvae = NoViableAltException("", 15, 0, self.input)
raise nvae
if alt15 == 1:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_item_in_arg482)
item34 = self.item()
self._state.following.pop()
self._adaptor.addChild(root_0, item34.tree)
elif alt15 == 2:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_composite_in_arg488)
composite35 = self.composite()
self._state.following.pop()
self._adaptor.addChild(root_0, composite35.tree)
elif alt15 == 3:
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_function_in_arg494)
function36 = self.function()
self._state.following.pop()
self._adaptor.addChild(root_0, function36.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class andOp_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def andOp(self, ):
retval = self.andOp_return()
retval.start = self.input.LT(1)
root_0 = None
WS37 = None
AND38 = None
WS39 = None
WS37_tree = None
AND38_tree = None
WS39_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
cnt16 = 0
while True:
alt16 = 2
LA16_0 = self.input.LA(1)
if (LA16_0 == WS) :
alt16 = 1
if alt16 == 1:
pass
WS37=self.match(self.input, WS, self.FOLLOW_WS_in_andOp508)
WS37_tree = self._adaptor.createWithPayload(WS37)
self._adaptor.addChild(root_0, WS37_tree)
else:
if cnt16 >= 1:
break
eee = EarlyExitException(16, self.input)
raise eee
cnt16 += 1
AND38=self.match(self.input, AND, self.FOLLOW_AND_in_andOp511)
AND38_tree = self._adaptor.createWithPayload(AND38)
self._adaptor.addChild(root_0, AND38_tree)
cnt17 = 0
while True:
alt17 = 2
LA17_0 = self.input.LA(1)
if (LA17_0 == WS) :
alt17 = 1
if alt17 == 1:
pass
WS39=self.match(self.input, WS, self.FOLLOW_WS_in_andOp513)
WS39_tree = self._adaptor.createWithPayload(WS39)
self._adaptor.addChild(root_0, WS39_tree)
else:
if cnt17 >= 1:
break
eee = EarlyExitException(17, self.input)
raise eee
cnt17 += 1
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class orOp_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def orOp(self, ):
retval = self.orOp_return()
retval.start = self.input.LT(1)
root_0 = None
WS40 = None
OR41 = None
WS42 = None
WS40_tree = None
OR41_tree = None
WS42_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
cnt18 = 0
while True:
alt18 = 2
LA18_0 = self.input.LA(1)
if (LA18_0 == WS) :
alt18 = 1
if alt18 == 1:
pass
WS40=self.match(self.input, WS, self.FOLLOW_WS_in_orOp528)
WS40_tree = self._adaptor.createWithPayload(WS40)
self._adaptor.addChild(root_0, WS40_tree)
else:
if cnt18 >= 1:
break
eee = EarlyExitException(18, self.input)
raise eee
cnt18 += 1
OR41=self.match(self.input, OR, self.FOLLOW_OR_in_orOp531)
OR41_tree = self._adaptor.createWithPayload(OR41)
self._adaptor.addChild(root_0, OR41_tree)
cnt19 = 0
while True:
alt19 = 2
LA19_0 = self.input.LA(1)
if (LA19_0 == WS) :
alt19 = 1
if alt19 == 1:
pass
WS42=self.match(self.input, WS, self.FOLLOW_WS_in_orOp533)
WS42_tree = self._adaptor.createWithPayload(WS42)
self._adaptor.addChild(root_0, WS42_tree)
else:
if cnt19 >= 1:
break
eee = EarlyExitException(19, self.input)
raise eee
cnt19 += 1
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class notOp_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def notOp(self, ):
retval = self.notOp_return()
retval.start = self.input.LT(1)
root_0 = None
char_literal43 = None
NOT44 = None
WS45 = None
char_literal43_tree = None
NOT44_tree = None
WS45_tree = None
try:
try:
alt21 = 2
LA21_0 = self.input.LA(1)
if (LA21_0 == 43) :
alt21 = 1
elif (LA21_0 == NOT) :
alt21 = 2
else:
nvae = NoViableAltException("", 21, 0, self.input)
raise nvae
if alt21 == 1:
pass
root_0 = self._adaptor.nil()
char_literal43=self.match(self.input, 43, self.FOLLOW_43_in_notOp548)
char_literal43_tree = self._adaptor.createWithPayload(char_literal43)
self._adaptor.addChild(root_0, char_literal43_tree)
elif alt21 == 2:
pass
root_0 = self._adaptor.nil()
NOT44=self.match(self.input, NOT, self.FOLLOW_NOT_in_notOp554)
NOT44_tree = self._adaptor.createWithPayload(NOT44)
self._adaptor.addChild(root_0, NOT44_tree)
cnt20 = 0
while True:
alt20 = 2
LA20_0 = self.input.LA(1)
if (LA20_0 == WS) :
alt20 = 1
if alt20 == 1:
pass
WS45=self.match(self.input, WS, self.FOLLOW_WS_in_notOp556)
WS45_tree = self._adaptor.createWithPayload(WS45)
self._adaptor.addChild(root_0, WS45_tree)
else:
if cnt20 >= 1:
break
eee = EarlyExitException(20, self.input)
raise eee
cnt20 += 1
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class sep_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def sep(self, ):
retval = self.sep_return()
retval.start = self.input.LT(1)
root_0 = None
WS46 = None
char_literal47 = None
WS48 = None
WS46_tree = None
char_literal47_tree = None
WS48_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
while True:
alt22 = 2
LA22_0 = self.input.LA(1)
if (LA22_0 == WS) :
alt22 = 1
if alt22 == 1:
pass
WS46=self.match(self.input, WS, self.FOLLOW_WS_in_sep571)
WS46_tree = self._adaptor.createWithPayload(WS46)
self._adaptor.addChild(root_0, WS46_tree)
else:
break
char_literal47=self.match(self.input, 44, self.FOLLOW_44_in_sep574)
char_literal47_tree = self._adaptor.createWithPayload(char_literal47)
self._adaptor.addChild(root_0, char_literal47_tree)
while True:
alt23 = 2
LA23_0 = self.input.LA(1)
if (LA23_0 == WS) :
alt23 = 1
if alt23 == 1:
pass
WS48=self.match(self.input, WS, self.FOLLOW_WS_in_sep576)
WS48_tree = self._adaptor.createWithPayload(WS48)
self._adaptor.addChild(root_0, WS48_tree)
else:
break
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class fnname_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def fnname(self, ):
retval = self.fnname_return()
retval.start = self.input.LT(1)
root_0 = None
set49 = None
set49_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
set49 = self.input.LT(1)
if (DISTANCE_FN <= self.input.LA(1) <= GEO_POINT_FN):
self.input.consume()
self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set49))
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class composite_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def composite(self, ):
retval = self.composite_return()
retval.start = self.input.LT(1)
root_0 = None
LPAREN50 = None
WS51 = None
WS53 = None
RPAREN54 = None
expression52 = None
LPAREN50_tree = None
WS51_tree = None
WS53_tree = None
RPAREN54_tree = None
stream_RPAREN = RewriteRuleTokenStream(self._adaptor, "token RPAREN")
stream_WS = RewriteRuleTokenStream(self._adaptor, "token WS")
stream_LPAREN = RewriteRuleTokenStream(self._adaptor, "token LPAREN")
stream_expression = RewriteRuleSubtreeStream(self._adaptor, "rule expression")
try:
try:
pass
LPAREN50=self.match(self.input, LPAREN, self.FOLLOW_LPAREN_in_composite612)
stream_LPAREN.add(LPAREN50)
while True:
alt24 = 2
LA24_0 = self.input.LA(1)
if (LA24_0 == WS) :
alt24 = 1
if alt24 == 1:
pass
WS51=self.match(self.input, WS, self.FOLLOW_WS_in_composite614)
stream_WS.add(WS51)
else:
break
self._state.following.append(self.FOLLOW_expression_in_composite617)
expression52 = self.expression()
self._state.following.pop()
stream_expression.add(expression52.tree)
while True:
alt25 = 2
LA25_0 = self.input.LA(1)
if (LA25_0 == WS) :
alt25 = 1
if alt25 == 1:
pass
WS53=self.match(self.input, WS, self.FOLLOW_WS_in_composite619)
stream_WS.add(WS53)
else:
break
RPAREN54=self.match(self.input, RPAREN, self.FOLLOW_RPAREN_in_composite622)
stream_RPAREN.add(RPAREN54)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
self._adaptor.addChild(root_0, stream_expression.nextTree())
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class item_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def item(self, ):
retval = self.item_return()
retval.start = self.input.LT(1)
root_0 = None
FIX55 = None
REWRITE57 = None
value56 = None
value58 = None
value59 = None
FIX55_tree = None
REWRITE57_tree = None
stream_FIX = RewriteRuleTokenStream(self._adaptor, "token FIX")
stream_REWRITE = RewriteRuleTokenStream(self._adaptor, "token REWRITE")
stream_value = RewriteRuleSubtreeStream(self._adaptor, "rule value")
try:
try:
alt26 = 3
LA26 = self.input.LA(1)
if LA26 == FIX:
alt26 = 1
elif LA26 == REWRITE:
alt26 = 2
elif LA26 == DISTANCE_FN or LA26 == GEO_POINT_FN or LA26 == TEXT or LA26 == QUOTE:
alt26 = 3
else:
nvae = NoViableAltException("", 26, 0, self.input)
raise nvae
if alt26 == 1:
pass
FIX55=self.match(self.input, FIX, self.FOLLOW_FIX_in_item642)
stream_FIX.add(FIX55)
self._state.following.append(self.FOLLOW_value_in_item644)
value56 = self.value()
self._state.following.pop()
stream_value.add(value56.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(LITERAL, "LITERAL"), root_1)
self._adaptor.addChild(root_1, stream_value.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
elif alt26 == 2:
pass
REWRITE57=self.match(self.input, REWRITE, self.FOLLOW_REWRITE_in_item658)
stream_REWRITE.add(REWRITE57)
self._state.following.append(self.FOLLOW_value_in_item660)
value58 = self.value()
self._state.following.pop()
stream_value.add(value58.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(FUZZY, "FUZZY"), root_1)
self._adaptor.addChild(root_1, stream_value.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
elif alt26 == 3:
pass
self._state.following.append(self.FOLLOW_value_in_item674)
value59 = self.value()
self._state.following.pop()
stream_value.add(value59.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
self._adaptor.addChild(root_0, stream_value.nextTree())
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class value_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def value(self, ):
retval = self.value_return()
retval.start = self.input.LT(1)
root_0 = None
text60 = None
phrase61 = None
stream_text = RewriteRuleSubtreeStream(self._adaptor, "rule text")
stream_phrase = RewriteRuleSubtreeStream(self._adaptor, "rule phrase")
try:
try:
alt27 = 2
LA27_0 = self.input.LA(1)
if ((DISTANCE_FN <= LA27_0 <= GEO_POINT_FN) or LA27_0 == TEXT) :
alt27 = 1
elif (LA27_0 == QUOTE) :
alt27 = 2
else:
nvae = NoViableAltException("", 27, 0, self.input)
raise nvae
if alt27 == 1:
pass
self._state.following.append(self.FOLLOW_text_in_value692)
text60 = self.text()
self._state.following.pop()
stream_text.add(text60.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(VALUE, "VALUE"), root_1)
self._adaptor.addChild(root_1, self._adaptor.createFromType(TEXT, "TEXT"))
self._adaptor.addChild(root_1, stream_text.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
elif alt27 == 2:
pass
self._state.following.append(self.FOLLOW_phrase_in_value708)
phrase61 = self.phrase()
self._state.following.pop()
stream_phrase.add(phrase61.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(VALUE, "VALUE"), root_1)
self._adaptor.addChild(root_1, self._adaptor.createFromType(STRING, "STRING"))
self._adaptor.addChild(root_1, stream_phrase.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class text_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def text(self, ):
retval = self.text_return()
retval.start = self.input.LT(1)
root_0 = None
t = None
TEXT62 = None
t_tree = None
TEXT62_tree = None
stream_GEO_POINT_FN = RewriteRuleTokenStream(self._adaptor, "token GEO_POINT_FN")
stream_DISTANCE_FN = RewriteRuleTokenStream(self._adaptor, "token DISTANCE_FN")
try:
try:
alt28 = 3
LA28 = self.input.LA(1)
if LA28 == TEXT:
alt28 = 1
elif LA28 == DISTANCE_FN:
alt28 = 2
elif LA28 == GEO_POINT_FN:
alt28 = 3
else:
nvae = NoViableAltException("", 28, 0, self.input)
raise nvae
if alt28 == 1:
pass
root_0 = self._adaptor.nil()
TEXT62=self.match(self.input, TEXT, self.FOLLOW_TEXT_in_text732)
TEXT62_tree = self._adaptor.createWithPayload(TEXT62)
self._adaptor.addChild(root_0, TEXT62_tree)
elif alt28 == 2:
pass
t=self.match(self.input, DISTANCE_FN, self.FOLLOW_DISTANCE_FN_in_text743)
stream_DISTANCE_FN.add(t)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
self._adaptor.addChild(root_0, self._adaptor.create(TEXT, t))
retval.tree = root_0
elif alt28 == 3:
pass
t=self.match(self.input, GEO_POINT_FN, self.FOLLOW_GEO_POINT_FN_in_text756)
stream_GEO_POINT_FN.add(t)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
self._adaptor.addChild(root_0, self._adaptor.create(TEXT, t))
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class phrase_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def phrase(self, ):
retval = self.phrase_return()
retval.start = self.input.LT(1)
root_0 = None
QUOTE63 = None
set64 = None
QUOTE65 = None
QUOTE63_tree = None
set64_tree = None
QUOTE65_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
QUOTE63=self.match(self.input, QUOTE, self.FOLLOW_QUOTE_in_phrase775)
QUOTE63_tree = self._adaptor.createWithPayload(QUOTE63)
self._adaptor.addChild(root_0, QUOTE63_tree)
while True:
alt29 = 2
LA29_0 = self.input.LA(1)
if ((ARGS <= LA29_0 <= TEXT) or (ESC <= LA29_0 <= 44)) :
alt29 = 1
if alt29 == 1:
pass
set64 = self.input.LT(1)
if (ARGS <= self.input.LA(1) <= TEXT) or (ESC <= self.input.LA(1) <= 44):
self.input.consume()
self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set64))
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
else:
break
QUOTE65=self.match(self.input, QUOTE, self.FOLLOW_QUOTE_in_phrase793)
QUOTE65_tree = self._adaptor.createWithPayload(QUOTE65)
self._adaptor.addChild(root_0, QUOTE65_tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
DFA3_eot = DFA.unpack(
u"\4\uffff"
)
DFA3_eof = DFA.unpack(
u"\2\2\2\uffff"
)
DFA3_min = DFA.unpack(
u"\2\17\2\uffff"
)
DFA3_max = DFA.unpack(
u"\1\30\1\31\2\uffff"
)
DFA3_accept = DFA.unpack(
u"\2\uffff\1\2\1\1"
)
DFA3_special = DFA.unpack(
u"\4\uffff"
)
DFA3_transition = [
DFA.unpack(u"\1\1\10\uffff\1\2"),
DFA.unpack(u"\1\1\10\uffff\1\2\1\3"),
DFA.unpack(u""),
DFA.unpack(u"")
]
DFA3 = DFA
DFA5_eot = DFA.unpack(
u"\4\uffff"
)
DFA5_eof = DFA.unpack(
u"\2\2\2\uffff"
)
DFA5_min = DFA.unpack(
u"\2\17\2\uffff"
)
DFA5_max = DFA.unpack(
u"\1\30\1\53\2\uffff"
)
DFA5_accept = DFA.unpack(
u"\2\uffff\1\2\1\1"
)
DFA5_special = DFA.unpack(
u"\4\uffff"
)
DFA5_transition = [
DFA.unpack(u"\1\1\10\uffff\1\2"),
DFA.unpack(u"\1\1\7\uffff\1\3\2\2\1\uffff\7\3\11\uffff\1\3"),
DFA.unpack(u""),
DFA.unpack(u"")
]
DFA5 = DFA
DFA6_eot = DFA.unpack(
u"\4\uffff"
)
DFA6_eof = DFA.unpack(
u"\2\2\2\uffff"
)
DFA6_min = DFA.unpack(
u"\2\17\2\uffff"
)
DFA6_max = DFA.unpack(
u"\1\30\1\53\2\uffff"
)
DFA6_accept = DFA.unpack(
u"\2\uffff\1\2\1\1"
)
DFA6_special = DFA.unpack(
u"\4\uffff"
)
DFA6_transition = [
DFA.unpack(u"\1\1\10\uffff\1\2"),
DFA.unpack(u"\1\1\7\uffff\3\2\1\3\7\2\11\uffff\1\2"),
DFA.unpack(u""),
DFA.unpack(u"")
]
DFA6 = DFA
DFA8_eot = DFA.unpack(
u"\31\uffff"
)
DFA8_eof = DFA.unpack(
u"\3\uffff\3\21\2\uffff\3\21\1\uffff\3\21\1\uffff\1\21\3\uffff\1"
u"\21\1\uffff\1\21\1\uffff\1\21"
)
DFA8_min = DFA.unpack(
u"\1\27\2\34\3\17\1\4\1\uffff\3\17\1\4\3\17\1\4\1\17\2\uffff\1\4"
u"\1\17\1\4\1\17\1\4\1\17"
)
DFA8_max = DFA.unpack(
u"\3\41\3\30\1\54\1\uffff\3\30\1\54\3\30\1\54\1\53\2\uffff\1\54\1"
u"\30\1\54\1\30\1\54\1\30"
)
DFA8_accept = DFA.unpack(
u"\7\uffff\1\2\11\uffff\1\3\1\1\6\uffff"
)
DFA8_special = DFA.unpack(
u"\31\uffff"
)
DFA8_transition = [
DFA.unpack(u"\1\7\4\uffff\1\4\1\5\1\1\1\2\1\3\1\6"),
DFA.unpack(u"\1\11\1\12\2\uffff\1\10\1\13"),
DFA.unpack(u"\1\15\1\16\2\uffff\1\14\1\17"),
DFA.unpack(u"\1\20\7\22\1\uffff\1\21"),
DFA.unpack(u"\1\20\10\22\1\21"),
DFA.unpack(u"\1\20\10\22\1\21"),
DFA.unpack(u"\35\23\1\24\13\23"),
DFA.unpack(u""),
DFA.unpack(u"\1\20\7\22\1\uffff\1\21"),
DFA.unpack(u"\1\20\7\22\1\uffff\1\21"),
DFA.unpack(u"\1\20\7\22\1\uffff\1\21"),
DFA.unpack(u"\35\25\1\26\13\25"),
DFA.unpack(u"\1\20\7\22\1\uffff\1\21"),
DFA.unpack(u"\1\20\7\22\1\uffff\1\21"),
DFA.unpack(u"\1\20\7\22\1\uffff\1\21"),
DFA.unpack(u"\35\27\1\30\13\27"),
DFA.unpack(u"\1\20\7\22\13\21\11\uffff\1\21"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\35\23\1\24\13\23"),
DFA.unpack(u"\1\20\7\22\1\uffff\1\21"),
DFA.unpack(u"\35\25\1\26\13\25"),
DFA.unpack(u"\1\20\7\22\1\uffff\1\21"),
DFA.unpack(u"\35\27\1\30\13\27"),
DFA.unpack(u"\1\20\7\22\1\uffff\1\21")
]
DFA8 = DFA
FOLLOW_WS_in_query112 = frozenset([15, 23, 27, 28, 29, 30, 31, 32, 33, 43])
FOLLOW_expression_in_query115 = frozenset([15])
FOLLOW_WS_in_query117 = frozenset([15])
FOLLOW_EOF_in_query120 = frozenset([1])
FOLLOW_sequence_in_expression139 = frozenset([1, 15])
FOLLOW_andOp_in_expression142 = frozenset([23, 27, 28, 29, 30, 31, 32, 33, 43])
FOLLOW_sequence_in_expression144 = frozenset([1, 15])
FOLLOW_factor_in_sequence170 = frozenset([1, 15])
FOLLOW_WS_in_sequence173 = frozenset([15, 23, 27, 28, 29, 30, 31, 32, 33, 43])
FOLLOW_factor_in_sequence176 = frozenset([1, 15])
FOLLOW_term_in_factor202 = frozenset([1, 15])
FOLLOW_orOp_in_factor205 = frozenset([23, 27, 28, 29, 30, 31, 32, 33, 43])
FOLLOW_term_in_factor207 = frozenset([1, 15])
FOLLOW_notOp_in_term231 = frozenset([23, 27, 28, 29, 30, 31, 32, 33, 43])
FOLLOW_primitive_in_term233 = frozenset([1])
FOLLOW_primitive_in_term247 = frozenset([1])
FOLLOW_restriction_in_primitive263 = frozenset([1])
FOLLOW_composite_in_primitive269 = frozenset([1])
FOLLOW_item_in_primitive275 = frozenset([1])
FOLLOW_comparable_in_restriction301 = frozenset([15, 16, 17, 18, 19, 20, 21, 22])
FOLLOW_comparator_in_restriction303 = frozenset([23, 28, 29, 30, 31, 32, 33])
FOLLOW_arg_in_restriction305 = frozenset([1])
FOLLOW_WS_in_comparator329 = frozenset([15, 16, 17, 18, 19, 20, 21, 22])
FOLLOW_LE_in_comparator335 = frozenset([1, 15])
FOLLOW_LESSTHAN_in_comparator341 = frozenset([1, 15])
FOLLOW_GE_in_comparator347 = frozenset([1, 15])
FOLLOW_GT_in_comparator353 = frozenset([1, 15])
FOLLOW_NE_in_comparator359 = frozenset([1, 15])
FOLLOW_EQ_in_comparator365 = frozenset([1, 15])
FOLLOW_HAS_in_comparator371 = frozenset([1, 15])
FOLLOW_WS_in_comparator374 = frozenset([1, 15])
FOLLOW_item_in_comparable396 = frozenset([1])
FOLLOW_function_in_comparable402 = frozenset([1])
FOLLOW_fnname_in_function417 = frozenset([23])
FOLLOW_LPAREN_in_function419 = frozenset([23, 24, 28, 29, 30, 31, 32, 33])
FOLLOW_arglist_in_function421 = frozenset([24])
FOLLOW_RPAREN_in_function423 = frozenset([1])
FOLLOW_arg_in_arglist452 = frozenset([1, 15, 44])
FOLLOW_sep_in_arglist455 = frozenset([23, 28, 29, 30, 31, 32, 33])
FOLLOW_arg_in_arglist457 = frozenset([1, 15, 44])
FOLLOW_item_in_arg482 = frozenset([1])
FOLLOW_composite_in_arg488 = frozenset([1])
FOLLOW_function_in_arg494 = frozenset([1])
FOLLOW_WS_in_andOp508 = frozenset([15, 25])
FOLLOW_AND_in_andOp511 = frozenset([15])
FOLLOW_WS_in_andOp513 = frozenset([1, 15])
FOLLOW_WS_in_orOp528 = frozenset([15, 26])
FOLLOW_OR_in_orOp531 = frozenset([15])
FOLLOW_WS_in_orOp533 = frozenset([1, 15])
FOLLOW_43_in_notOp548 = frozenset([1])
FOLLOW_NOT_in_notOp554 = frozenset([15])
FOLLOW_WS_in_notOp556 = frozenset([1, 15])
FOLLOW_WS_in_sep571 = frozenset([15, 44])
FOLLOW_44_in_sep574 = frozenset([1, 15])
FOLLOW_WS_in_sep576 = frozenset([1, 15])
FOLLOW_set_in_fnname0 = frozenset([1])
FOLLOW_LPAREN_in_composite612 = frozenset([15, 23, 27, 28, 29, 30, 31, 32, 33, 43])
FOLLOW_WS_in_composite614 = frozenset([15, 23, 27, 28, 29, 30, 31, 32, 33, 43])
FOLLOW_expression_in_composite617 = frozenset([15, 24])
FOLLOW_WS_in_composite619 = frozenset([15, 24])
FOLLOW_RPAREN_in_composite622 = frozenset([1])
FOLLOW_FIX_in_item642 = frozenset([28, 29, 30, 31, 32, 33])
FOLLOW_value_in_item644 = frozenset([1])
FOLLOW_REWRITE_in_item658 = frozenset([28, 29, 30, 31, 32, 33])
FOLLOW_value_in_item660 = frozenset([1])
FOLLOW_value_in_item674 = frozenset([1])
FOLLOW_text_in_value692 = frozenset([1])
FOLLOW_phrase_in_value708 = frozenset([1])
FOLLOW_TEXT_in_text732 = frozenset([1])
FOLLOW_DISTANCE_FN_in_text743 = frozenset([1])
FOLLOW_GEO_POINT_FN_in_text756 = frozenset([1])
FOLLOW_QUOTE_in_phrase775 = frozenset([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44])
FOLLOW_set_in_phrase777 = frozenset([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44])
FOLLOW_QUOTE_in_phrase793 = frozenset([1])
def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
from google.appengine._internal.antlr3.main import ParserMain
main = ParserMain("QueryLexer", QueryParser)
main.stdin = stdin
main.stdout = stdout
main.stderr = stderr
main.execute(argv)
if __name__ == '__main__':
main(sys.argv)
| |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command-line interface to the OpenStack Identity API."""
from __future__ import print_function
import argparse
import logging
import os
import sys
import warnings
from oslo_utils import encodeutils
import six
import keystoneclient
from keystoneclient import access
from keystoneclient.contrib.bootstrap import shell as shell_bootstrap
from keystoneclient import exceptions as exc
from keystoneclient.generic import shell as shell_generic
from keystoneclient import session
from keystoneclient import utils
from keystoneclient.v2_0 import shell as shell_v2_0
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v)
if value:
return value
return kwargs.get('default', '')
class OpenStackIdentityShell(object):
def __init__(self, parser_class=argparse.ArgumentParser):
# Since Python 2.7, DeprecationWarning is ignored by default, enable
# it so that the deprecation message is displayed.
warnings.simplefilter('once', category=DeprecationWarning)
warnings.warn(
'The keystone CLI is deprecated in favor of '
'python-openstackclient. For a Python library, continue using '
'python-keystoneclient.', DeprecationWarning)
# And back to normal!
warnings.resetwarnings()
self.parser_class = parser_class
def get_base_parser(self):
parser = self.parser_class(
prog='keystone',
description=__doc__.strip(),
epilog='See "keystone help COMMAND" '
'for help on a specific command.',
add_help=False,
formatter_class=OpenStackHelpFormatter,
)
# Global arguments
parser.add_argument('-h',
'--help',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--version',
action='version',
version=keystoneclient.__version__,
help="Shows the client version and exits.")
parser.add_argument('--debug',
default=False,
action='store_true',
help="Prints debugging output onto the console, "
"this includes the curl request and response "
"calls. Helpful for debugging and "
"understanding the API calls.")
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=env('OS_USERNAME'),
help='Name used for authentication with the '
'OpenStack Identity service. '
'Defaults to env[OS_USERNAME].')
parser.add_argument('--os_username',
help=argparse.SUPPRESS)
parser.add_argument('--os-password',
metavar='<auth-password>',
default=env('OS_PASSWORD'),
help='Password used for authentication with the '
'OpenStack Identity service. '
'Defaults to env[OS_PASSWORD].')
parser.add_argument('--os_password',
help=argparse.SUPPRESS)
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=env('OS_TENANT_NAME'),
help='Tenant to request authorization on. '
'Defaults to env[OS_TENANT_NAME].')
parser.add_argument('--os_tenant_name',
help=argparse.SUPPRESS)
parser.add_argument('--os-tenant-id',
metavar='<tenant-id>',
default=env('OS_TENANT_ID'),
help='Tenant to request authorization on. '
'Defaults to env[OS_TENANT_ID].')
parser.add_argument('--os_tenant_id',
help=argparse.SUPPRESS)
parser.add_argument('--os-auth-url',
metavar='<auth-url>',
default=env('OS_AUTH_URL'),
help='Specify the Identity endpoint to use for '
'authentication. '
'Defaults to env[OS_AUTH_URL].')
parser.add_argument('--os_auth_url',
help=argparse.SUPPRESS)
parser.add_argument('--os-region-name',
metavar='<region-name>',
default=env('OS_REGION_NAME'),
help='Specify the region to use. '
'Defaults to env[OS_REGION_NAME].')
parser.add_argument('--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument('--os-identity-api-version',
metavar='<identity-api-version>',
default=env('OS_IDENTITY_API_VERSION',
'KEYSTONE_VERSION'),
help='Specify Identity API version to use. '
'Defaults to env[OS_IDENTITY_API_VERSION]'
' or 2.0.')
parser.add_argument('--os_identity_api_version',
help=argparse.SUPPRESS)
parser.add_argument('--os-token',
metavar='<service-token>',
default=env('OS_SERVICE_TOKEN'),
help='Specify an existing token to use instead of '
'retrieving one via authentication (e.g. '
'with username & password). '
'Defaults to env[OS_SERVICE_TOKEN].')
parser.add_argument('--os-endpoint',
metavar='<service-endpoint>',
default=env('OS_SERVICE_ENDPOINT'),
help='Specify an endpoint to use instead of '
'retrieving one from the service catalog '
'(via authentication). '
'Defaults to env[OS_SERVICE_ENDPOINT].')
parser.add_argument('--os-cache',
default=env('OS_CACHE', default=False),
action='store_true',
help='Use the auth token cache. '
'Defaults to env[OS_CACHE].')
parser.add_argument('--os_cache',
help=argparse.SUPPRESS)
parser.add_argument('--force-new-token',
default=False,
action="store_true",
dest='force_new_token',
help="If the keyring is available and in use, "
"token will always be stored and fetched "
"from the keyring until the token has "
"expired. Use this option to request a "
"new token and replace the existing one "
"in the keyring.")
parser.add_argument('--stale-duration',
metavar='<seconds>',
default=access.STALE_TOKEN_DURATION,
dest='stale_duration',
help="Stale duration (in seconds) used to "
"determine whether a token has expired "
"when retrieving it from keyring. This "
"is useful in mitigating process or "
"network delays. Default is %s seconds." %
access.STALE_TOKEN_DURATION)
session.Session.register_cli_options(parser)
parser.add_argument('--os_cacert', help=argparse.SUPPRESS)
parser.add_argument('--os_key', help=argparse.SUPPRESS)
parser.add_argument('--os_cert', help=argparse.SUPPRESS)
return parser
def get_subcommand_parser(self, version):
parser = self.get_base_parser()
self.subcommands = {}
subparsers = parser.add_subparsers(metavar='<subcommand>')
try:
actions_module = {
'2.0': shell_v2_0,
}[version]
except KeyError:
actions_module = shell_v2_0
self._find_actions(subparsers, actions_module)
self._find_actions(subparsers, shell_generic)
self._find_actions(subparsers, shell_bootstrap)
self._find_actions(subparsers, self)
self._add_bash_completion_subparser(subparsers)
return parser
def _add_bash_completion_subparser(self, subparsers):
subparser = subparsers.add_parser(
'bash_completion',
add_help=False,
formatter_class=OpenStackHelpFormatter
)
self.subcommands['bash_completion'] = subparser
subparser.set_defaults(func=self.do_bash_completion)
def _find_actions(self, subparsers, actions_module):
for attr in (a for a in dir(actions_module) if a.startswith('do_')):
# I prefer to be hyphen-separated instead of underscores.
command = attr[3:].replace('_', '-')
callback = getattr(actions_module, attr)
desc = callback.__doc__ or ''
help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser(
command,
help=help,
description=desc,
add_help=False,
formatter_class=OpenStackHelpFormatter)
subparser.add_argument('-h', '--help', action='help',
help=argparse.SUPPRESS)
self.subcommands[command] = subparser
group = subparser.add_argument_group(title='Arguments')
for (args, kwargs) in arguments:
group.add_argument(*args, **kwargs)
subparser.set_defaults(func=callback)
def auth_check(self, args):
if args.os_token or args.os_endpoint:
if not args.os_token:
raise exc.CommandError(
'Expecting a token provided via either --os-token or '
'env[OS_SERVICE_TOKEN]')
if not args.os_endpoint:
raise exc.CommandError(
'Expecting an endpoint provided via either '
'--os-endpoint or env[OS_SERVICE_ENDPOINT]')
# user supplied a token and endpoint and at least one other cred
if args.os_username or args.os_password or args.os_auth_url:
msg = ('WARNING: Bypassing authentication using a token & '
'endpoint (authentication credentials are being '
'ignored).')
print(msg)
else:
if not args.os_auth_url:
raise exc.CommandError(
'Expecting an auth URL via either --os-auth-url or '
'env[OS_AUTH_URL]')
if args.os_username or args.os_password:
if not args.os_username:
raise exc.CommandError(
'Expecting a username provided via either '
'--os-username or env[OS_USERNAME]')
if not args.os_password:
args.os_password = utils.prompt_user_password()
# No password because we didn't have a tty or the
# user Ctl-D when prompted?
if not args.os_password:
raise exc.CommandError(
'Expecting a password provided via either '
'--os-password, env[OS_PASSWORD], or '
'prompted response')
else:
raise exc.CommandError('Expecting authentication method via'
'\n either a service token, '
'--os-token or env[OS_SERVICE_TOKEN], '
'\n credentials, '
'--os-username or env[OS_USERNAME]')
def main(self, argv):
# Parse args once to find version
parser = self.get_base_parser()
(options, args) = parser.parse_known_args(argv)
# build available subcommands based on version
api_version = options.os_identity_api_version
subcommand_parser = self.get_subcommand_parser(api_version)
self.parser = subcommand_parser
# Handle top-level --help/-h before attempting to parse
# a command off the command line
if not argv or options.help:
self.do_help(options)
return 0
# Parse args again and call whatever callback was selected
args = subcommand_parser.parse_args(argv)
# Short-circuit and deal with help command right away.
if args.func == self.do_help:
self.do_help(args)
return 0
elif args.func == self.do_bash_completion:
self.do_bash_completion(args)
return 0
if args.debug:
logging_level = logging.DEBUG
iso_logger = logging.getLogger('iso8601')
iso_logger.setLevel('WARN')
else:
logging_level = logging.WARNING
logging.basicConfig(level=logging_level)
# TODO(heckj): supporting backwards compatibility with environment
# variables. To be removed after DEVSTACK is updated, ideally in
# the Grizzly release cycle.
args.os_token = args.os_token or env('SERVICE_TOKEN')
args.os_endpoint = args.os_endpoint or env('SERVICE_ENDPOINT')
if utils.isunauthenticated(args.func):
self.cs = shell_generic.CLIENT_CLASS(endpoint=args.os_auth_url,
cacert=args.os_cacert,
key=args.os_key,
cert=args.os_cert,
insecure=args.insecure,
timeout=args.timeout)
else:
self.auth_check(args)
token = None
if args.os_token and args.os_endpoint:
token = args.os_token
api_version = options.os_identity_api_version
self.cs = self.get_api_class(api_version)(
username=args.os_username,
tenant_name=args.os_tenant_name,
tenant_id=args.os_tenant_id,
token=token,
endpoint=args.os_endpoint,
password=args.os_password,
auth_url=args.os_auth_url,
region_name=args.os_region_name,
cacert=args.os_cacert,
key=args.os_key,
cert=args.os_cert,
insecure=args.insecure,
debug=args.debug,
use_keyring=args.os_cache,
force_new_token=args.force_new_token,
stale_duration=args.stale_duration,
timeout=args.timeout)
try:
args.func(self.cs, args)
except exc.Unauthorized:
raise exc.CommandError("Invalid OpenStack Identity credentials.")
except exc.AuthorizationFailure:
raise exc.CommandError("Unable to authorize user")
def get_api_class(self, version):
try:
return {
"2.0": shell_v2_0.CLIENT_CLASS,
}[version]
except KeyError:
if version:
msg = ('WARNING: unsupported identity-api-version %s, '
'falling back to 2.0' % version)
print(msg)
return shell_v2_0.CLIENT_CLASS
def do_bash_completion(self, args):
"""Prints all of the commands and options to stdout.
The keystone.bash_completion script doesn't have to hard code them.
"""
commands = set()
options = set()
for sc_str, sc in self.subcommands.items():
commands.add(sc_str)
for option in list(sc._optionals._option_string_actions):
options.add(option)
commands.remove('bash-completion')
commands.remove('bash_completion')
print(' '.join(commands | options))
@utils.arg('command', metavar='<subcommand>', nargs='?',
help='Display help for <subcommand>.')
def do_help(self, args):
"""Display help about this program or one of its subcommands."""
if getattr(args, 'command', None):
if args.command in self.subcommands:
self.subcommands[args.command].print_help()
else:
raise exc.CommandError("'%s' is not a valid subcommand" %
args.command)
else:
self.parser.print_help()
# I'm picky about my shell help.
class OpenStackHelpFormatter(argparse.HelpFormatter):
INDENT_BEFORE_ARGUMENTS = 6
MAX_WIDTH_ARGUMENTS = 32
def add_arguments(self, actions):
for action in filter(lambda x: not x.option_strings, actions):
if not action.choices:
continue
for choice in action.choices:
length = len(choice) + self.INDENT_BEFORE_ARGUMENTS
if(length > self._max_help_position and
length <= self.MAX_WIDTH_ARGUMENTS):
self._max_help_position = length
super(OpenStackHelpFormatter, self).add_arguments(actions)
def start_section(self, heading):
# Title-case the headings
heading = '%s%s' % (heading[0].upper(), heading[1:])
super(OpenStackHelpFormatter, self).start_section(heading)
def main():
try:
OpenStackIdentityShell().main(sys.argv[1:])
except KeyboardInterrupt:
print("... terminating keystone client", file=sys.stderr)
sys.exit(130)
except Exception as e:
print(encodeutils.safe_encode(six.text_type(e)), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.