gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from . import helpers
from kolibri.core.auth.models import Classroom
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.models import LearnerGroup
from kolibri.core.auth.test.helpers import provision_device
from kolibri.core.content.models import ContentNode
from kolibri.core.exams.models import Exam
from kolibri.core.exams.models import ExamAssignment
from kolibri.core.lessons.models import Lesson
from kolibri.core.lessons.models import LessonAssignment
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import ExamAttemptLog
from kolibri.core.logger.models import ExamLog
from kolibri.core.logger.models import MasteryLog
DUMMY_PASSWORD = "password"
class ExerciseDifficultQuestionTestCase(APITestCase):
def setUp(self):
provision_device()
self.facility = Facility.objects.create(name="My Facility")
self.classroom = Classroom.objects.create(
name="My Classroom", parent=self.facility
)
self.group = LearnerGroup.objects.create(name="My Group", parent=self.classroom)
self.facility_and_classroom_coach = helpers.create_coach(
username="facility_and_classroom_coach",
password=DUMMY_PASSWORD,
facility=self.facility,
classroom=self.classroom,
is_facility_coach=True,
)
self.learner = helpers.create_learner(
username="learner", password=DUMMY_PASSWORD, facility=self.facility
)
self.classroom_group_learner = helpers.create_learner(
username="classroom_group_learner",
password=DUMMY_PASSWORD,
facility=self.facility,
classroom=self.classroom,
learner_group=self.group,
)
# Need ContentNodes
self.channel_id = "15f32edcec565396a1840c5413c92450"
self.lesson_id = "15f32edcec565396a1840c5413c92452"
self.content_ids = [
"15f32edcec565396a1840c5413c92451",
"15f32edcec565396a1840c5413c92452",
"15f32edcec565396a1840c5413c92453",
]
self.contentnode_ids = [
"25f32edcec565396a1840c5413c92451",
"25f32edcec565396a1840c5413c92452",
"25f32edcec565396a1840c5413c92453",
]
self.node_1 = ContentNode.objects.create(
title="Node 1",
available=True,
id=self.contentnode_ids[0],
content_id=self.content_ids[0],
channel_id=self.channel_id,
)
self.lesson = Lesson.objects.create(
id=self.lesson_id,
title="My Lesson",
created_by=self.facility_and_classroom_coach,
collection=self.classroom,
resources=json.dumps(
[
{
"contentnode_id": self.node_1.id,
"content_id": self.node_1.content_id,
"channel_id": self.channel_id,
}
]
),
)
self.assignment_1 = LessonAssignment.objects.create(
lesson=self.lesson,
assigned_by=self.facility_and_classroom_coach,
collection=self.classroom,
)
self.exercise_difficulties_basename = (
"kolibri:kolibri.plugins.coach:exercisedifficulties"
)
def test_learner_cannot_access_by_classroom_id(self):
self.client.login(username=self.learner.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"classroom_id": self.classroom.id},
)
self.assertEqual(response.status_code, 403)
def test_learner_cannot_access_by_lesson_id(self):
self.client.login(username=self.learner.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"lesson_id": self.lesson.id, "classroom_id": self.classroom.id},
)
self.assertEqual(response.status_code, 403)
def test_learner_cannot_access_by_group_id(self):
self.client.login(username="learner", password=DUMMY_PASSWORD)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"group_id": self.group.id, "classroom_id": self.classroom.id},
)
self.assertEqual(response.status_code, 403)
def test_coach_classroom_id_required(self):
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
)
)
self.assertEqual(response.status_code, 412)
def test_coach_no_progress_by_classroom_id(self):
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 0)
def test_coach_no_progress_by_lesson_id(self):
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"lesson_id": self.lesson.id, "classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 0)
def test_coach_no_progress_by_group_id(self):
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"group_id": self.group.id, "classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 0)
def _set_one_difficult(self, user):
self.sessionlog = ContentSessionLog.objects.create(
user=user,
content_id=self.content_ids[0],
channel_id=self.node_1.channel_id,
kind="exercise",
progress=0.1,
start_timestamp=datetime.datetime.now(),
)
self.summarylog = ContentSummaryLog.objects.create(
user=user,
content_id=self.content_ids[0],
channel_id=self.node_1.channel_id,
kind="exercise",
progress=0.1,
start_timestamp=datetime.datetime.now(),
)
self.masterylog = MasteryLog.objects.create(
user=user,
summarylog=self.summarylog,
start_timestamp=datetime.datetime.now(),
mastery_level=1,
)
AttemptLog.objects.create(
masterylog=self.masterylog,
sessionlog=self.sessionlog,
start_timestamp=datetime.datetime.now(),
end_timestamp=datetime.datetime.now(),
complete=True,
correct=0,
user=user,
item="test",
)
def test_coach_one_difficult_by_classroom_id(self):
self._set_one_difficult(self.classroom_group_learner)
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
def test_coach_one_difficult_by_lesson_id(self):
self._set_one_difficult(self.classroom_group_learner)
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"lesson_id": self.lesson.id, "classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
def test_coach_one_difficult_by_lesson_id_repeated_assignment(self):
LessonAssignment.objects.create(
lesson=self.lesson,
assigned_by=self.facility_and_classroom_coach,
collection=self.group,
)
self._set_one_difficult(self.classroom_group_learner)
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"lesson_id": self.lesson.id, "classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
def test_coach_one_difficult_by_group_id(self):
self._set_one_difficult(self.classroom_group_learner)
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"group_id": self.group.id, "classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
def test_coach_two_difficult_by_lesson_id(self):
self._set_one_difficult(self.classroom_group_learner)
AttemptLog.objects.create(
masterylog=self.masterylog,
sessionlog=self.sessionlog,
start_timestamp=datetime.datetime.now(),
end_timestamp=datetime.datetime.now(),
complete=True,
correct=0,
user=self.classroom_group_learner,
item="nottest",
)
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"lesson_id": self.lesson.id, "classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
self.assertEqual(response.data[1]["total"], 1)
self.assertEqual(response.data[1]["correct"], 0)
def test_coach_one_difficult_one_not_by_lesson_id(self):
self._set_one_difficult(self.classroom_group_learner)
AttemptLog.objects.create(
masterylog=self.masterylog,
sessionlog=self.sessionlog,
start_timestamp=datetime.datetime.now(),
end_timestamp=datetime.datetime.now(),
complete=True,
correct=1,
user=self.classroom_group_learner,
item="nottest",
)
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"lesson_id": self.lesson.id, "classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 2)
self.assertTrue(
any(map(lambda x: x["total"] == 1 and x["correct"] == 0, response.data))
)
self.assertTrue(
any(map(lambda x: x["total"] == 1 and x["correct"] == 1, response.data))
)
def test_coach_difficult_no_assigned_by_lesson_id(self):
self._set_one_difficult(self.classroom_group_learner)
AttemptLog.objects.create(
masterylog=self.masterylog,
sessionlog=self.sessionlog,
start_timestamp=datetime.datetime.now(),
end_timestamp=datetime.datetime.now(),
complete=True,
correct=1,
user=self.classroom_group_learner,
item="nottest",
)
LessonAssignment.objects.all().delete()
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"lesson_id": self.lesson.id, "classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 0)
def test_coach_difficult_no_assigned_by_group_id(self):
self._set_one_difficult(self.classroom_group_learner)
AttemptLog.objects.create(
masterylog=self.masterylog,
sessionlog=self.sessionlog,
start_timestamp=datetime.datetime.now(),
end_timestamp=datetime.datetime.now(),
complete=True,
correct=1,
user=self.classroom_group_learner,
item="nottest",
)
LessonAssignment.objects.all().delete()
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={"group_id": self.group.id, "classroom_id": self.classroom.id},
)
self.assertEqual(len(response.data), 2)
self.assertTrue(
any(map(lambda x: x["total"] == 1 and x["correct"] == 0, response.data))
)
self.assertTrue(
any(map(lambda x: x["total"] == 1 and x["correct"] == 1, response.data))
)
def test_coach_difficult_both_assigned_by_lesson_id_group_id(self):
self._set_one_difficult(self.classroom_group_learner)
learner2 = FacilityUser.objects.create(
username="learner2", facility=self.facility
)
self.classroom.add_member(learner2)
self._set_one_difficult(learner2)
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={
"lesson_id": self.lesson.id,
"group_id": self.group.id,
"classroom_id": self.classroom.id,
},
)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
def test_coach_difficult_group_id_not_in_lesson(self):
self._set_one_difficult(self.classroom_group_learner)
learner2 = FacilityUser.objects.create(
username="learner2", facility=self.facility
)
self.classroom.add_member(learner2)
self._set_one_difficult(learner2)
self.group.remove_member(self.classroom_group_learner)
self.assignment_1.delete()
LessonAssignment.objects.create(
lesson=self.lesson,
assigned_by=self.facility_and_classroom_coach,
collection=self.group,
)
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
response = self.client.get(
reverse(
self.exercise_difficulties_basename + "-detail",
kwargs={"pk": self.content_ids[0]},
),
data={
"lesson_id": self.lesson.id,
"group_id": self.group.id,
"classroom_id": self.classroom.id,
},
)
self.assertEqual(len(response.data), 0)
class QuizDifficultQuestionTestCase(APITestCase):
def setUp(self):
provision_device()
self.facility = Facility.objects.create(name="My Facility")
self.classroom = Classroom.objects.create(
name="My Classroom", parent=self.facility
)
self.group = LearnerGroup.objects.create(name="My Group", parent=self.classroom)
self.facility_and_classroom_coach = helpers.create_coach(
username="facility_and_classroom_coach",
password=DUMMY_PASSWORD,
facility=self.facility,
classroom=self.classroom,
is_facility_coach=True,
)
self.learner = helpers.create_learner(
username="learner", password=DUMMY_PASSWORD, facility=self.facility
)
self.classroom_group_learner = helpers.create_learner(
username="classroom_group_learner",
password=DUMMY_PASSWORD,
facility=self.facility,
classroom=self.classroom,
learner_group=self.group,
)
self.classroom_group_learner_2 = helpers.create_learner(
username="classroom_group_learner_2",
password=DUMMY_PASSWORD,
facility=self.facility,
classroom=self.classroom,
learner_group=self.group,
)
self.quiz = Exam.objects.create(
title="My Lesson",
creator=self.facility_and_classroom_coach,
collection=self.classroom,
question_count=5,
active=False,
)
self.assignment_1 = ExamAssignment.objects.create(
exam=self.quiz,
assigned_by=self.facility_and_classroom_coach,
collection=self.classroom,
)
self.quiz_difficulties_basename = (
"kolibri:kolibri.plugins.coach:quizdifficulties"
)
self.content_id = "25f32edcec565396a1840c5413c92451"
def _get_quiz_difficulties(self, for_group=False):
data = {"group_id": self.group.id} if for_group else {}
return self.client.get(
reverse(
self.quiz_difficulties_basename + "-detail", kwargs={"pk": self.quiz.id}
),
data=data,
)
def _login_as_coach(self):
self.client.login(
username=self.facility_and_classroom_coach.username, password=DUMMY_PASSWORD
)
def test_learner_cannot_access(self):
self.client.login(username=self.learner.username, password=DUMMY_PASSWORD)
response = self._get_quiz_difficulties()
self.assertEqual(response.status_code, 403)
def test_learner_cannot_access_by_group_id(self):
self.client.login(username=self.learner.username, password=DUMMY_PASSWORD)
response = self._get_quiz_difficulties(for_group=True)
self.assertEqual(response.status_code, 403)
def test_coach_no_progress(self):
self._login_as_coach()
response = self._get_quiz_difficulties()
self.assertEqual(len(response.data), 0)
def test_coach_no_progress_by_group_id(self):
self._login_as_coach()
response = self._get_quiz_difficulties(for_group=True)
self.assertEqual(len(response.data), 0)
def _set_one_difficult(self, user):
self.examlog = ExamLog.objects.create(user=user, exam=self.quiz)
ExamAttemptLog.objects.create(
examlog=self.examlog,
start_timestamp=datetime.datetime.now(),
end_timestamp=datetime.datetime.now(),
complete=True,
correct=0,
user=user,
item="test",
content_id=self.content_id,
)
def test_coach_one_difficult(self):
self._set_one_difficult(self.classroom_group_learner)
self._login_as_coach()
response = self._get_quiz_difficulties()
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
def test_active_and_unsubmitted_quizzes_are_not_returned(self):
self._set_one_difficult(self.classroom_group_learner)
# Reactivate exam, but flag learner as not having submitted it
self.quiz.active = True
self.quiz.save()
self.examlog.closed = False
self.examlog.save()
self._login_as_coach()
response = self._get_quiz_difficulties()
self.assertEqual(len(response.data), 0)
def test_active_and_submtted_quizzes_are_returned(self):
self._set_one_difficult(self.classroom_group_learner)
# Reactivate exam, and flag learner as having submitted it
self.quiz.active = True
self.quiz.save()
self.examlog.closed = True
self.examlog.save()
self._login_as_coach()
response = self._get_quiz_difficulties()
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
def test_submitted_quizzes_are_in_total(self):
self._set_one_difficult(self.classroom_group_learner)
self._login_as_coach()
# Reactivate quiz and simulate 2 quiz submissions.
self.quiz.active = True
self.quiz.save()
self.examlog.closed = True
self.examlog.save()
ExamLog.objects.create(
user=self.classroom_group_learner_2, exam=self.quiz, closed=True
)
response = self._get_quiz_difficulties()
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 2)
self.assertEqual(response.data[0]["correct"], 0)
def test_coach_one_two_started_difficult(self):
self._set_one_difficult(self.classroom_group_learner)
self._login_as_coach()
response = self._get_quiz_difficulties()
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
def test_coach_one_difficult_repeated_assignment(self):
ExamAssignment.objects.create(
exam=self.quiz,
assigned_by=self.facility_and_classroom_coach,
collection=self.group,
)
self._set_one_difficult(self.classroom_group_learner)
self._login_as_coach()
response = self._get_quiz_difficulties()
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
def test_coach_one_difficult_by_group_id(self):
self._set_one_difficult(self.classroom_group_learner)
self._login_as_coach()
response = self._get_quiz_difficulties(for_group=True)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
def test_coach_two_difficult(self):
self._set_one_difficult(self.classroom_group_learner)
ExamAttemptLog.objects.create(
examlog=self.examlog,
start_timestamp=datetime.datetime.now(),
end_timestamp=datetime.datetime.now(),
complete=True,
correct=0,
user=self.classroom_group_learner,
item="notatest",
content_id=self.content_id,
)
self._login_as_coach()
response = self._get_quiz_difficulties()
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
self.assertEqual(response.data[1]["total"], 1)
self.assertEqual(response.data[1]["correct"], 0)
def test_coach_one_difficult_one_not(self):
self._set_one_difficult(self.classroom_group_learner)
ExamAttemptLog.objects.create(
examlog=self.examlog,
start_timestamp=datetime.datetime.now(),
end_timestamp=datetime.datetime.now(),
complete=True,
correct=1,
user=self.classroom_group_learner,
item="notatest",
content_id=self.content_id,
)
self._login_as_coach()
response = self._get_quiz_difficulties()
self.assertEqual(len(response.data), 2)
self.assertTrue(
any(map(lambda x: x["total"] == 1 and x["correct"] == 0, response.data))
)
self.assertTrue(
any(map(lambda x: x["total"] == 1 and x["correct"] == 1, response.data))
)
def test_coach_difficult_by_group_id(self):
self._set_one_difficult(self.classroom_group_learner)
ExamAttemptLog.objects.create(
examlog=self.examlog,
start_timestamp=datetime.datetime.now(),
end_timestamp=datetime.datetime.now(),
complete=True,
correct=1,
user=self.classroom_group_learner,
item="notatest",
content_id=self.content_id,
)
self._login_as_coach()
response = self._get_quiz_difficulties(for_group=True)
self.assertEqual(len(response.data), 2)
self.assertTrue(
any(map(lambda x: x["total"] == 1 and x["correct"] == 0, response.data))
)
self.assertTrue(
any(map(lambda x: x["total"] == 1 and x["correct"] == 1, response.data))
)
def test_coach_difficult_both_assigned_by_group_id(self):
self._set_one_difficult(self.classroom_group_learner)
learner2 = FacilityUser.objects.create(
username="learner2", facility=self.facility
)
self.classroom.add_member(learner2)
self._set_one_difficult(learner2)
self._login_as_coach()
response = self._get_quiz_difficulties(for_group=True)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]["total"], 1)
self.assertEqual(response.data[0]["correct"], 0)
|
|
"""Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# updated. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_bootstrap_external = None
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
def _new_module(name):
return type(sys)(name)
class _ManageReload:
"""Manages the possible clean-up of sys.modules for load_module()."""
def __init__(self, name):
self._name = name
def __enter__(self):
self._is_reload = self._name in sys.modules
def __exit__(self, *args):
if any(arg is not None for arg in args) and not self._is_reload:
try:
del sys.modules[self._name]
except KeyError:
pass
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError('deadlock detected by %r' % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError('cannot release un-acquired lock')
assert self.count > 0
self.count -= 1
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return '_ModuleLock({!r}) at {}'.format(self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError('cannot release un-acquired lock')
self.count -= 1
def __repr__(self):
return '_DummyModuleLock({!r}) at {}'.format(self.name, id(self))
class _ModuleLockManager:
def __init__(self, name):
self._name = name
self._lock = None
def __enter__(self):
try:
self._lock = _get_module_lock(self._name)
finally:
_imp.release_lock()
self._lock.acquire()
def __exit__(self, *args, **kwargs):
self._lock.release()
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Should only be called with the import lock taken."""
lock = None
try:
lock = _module_locks[name]()
except KeyError:
pass
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(_):
del _module_locks[name]
_module_locks[name] = _weakref.ref(lock, cb)
return lock
def _lock_unlock_module(name):
"""Release the global import lock, and acquires then release the
module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
Should only be called with the import lock taken."""
lock = _get_module_lock(name)
_imp.release_lock()
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError('{!r} is not a frozen module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
# Typically used by loader classes as a method replacement.
def _load_module_shim(self, fullname):
"""Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
spec = spec_from_loader(fullname, self)
if fullname in sys.modules:
module = sys.modules[fullname]
_exec(spec, module)
return sys.modules[fullname]
else:
return _load(spec)
# Module specifications #######################################################
def _module_repr(module):
# The implementation of ModuleType__repr__().
loader = getattr(module, '__loader__', None)
if hasattr(loader, 'module_repr'):
# As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader
# drop their implementations for module_repr. we can add a
# deprecation warning here.
try:
return loader.module_repr(module)
except Exception:
pass
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return _module_repr_from_spec(spec)
# We could use module.__class__.__name__ instead of 'module' in the
# various repr permutations.
try:
name = module.__name__
except AttributeError:
name = '?'
try:
filename = module.__file__
except AttributeError:
if loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, loader)
else:
return '<module {!r} from {!r}>'.format(name, filename)
class _installed_safely:
def __init__(self, module):
self._module = module
self._spec = module.__spec__
def __enter__(self):
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes
# wrong)
self._spec._initializing = True
sys.modules[self._spec.name] = self._module
def __exit__(self, *args):
try:
spec = self._spec
if any(arg is not None for arg in args):
try:
del sys.modules[spec.name]
except KeyError:
pass
else:
_verbose_message('import {!r} # {!r}', spec.name, spec.loader)
finally:
self._spec._initializing = False
class ModuleSpec:
"""The specification for a module, used for loading.
A module's spec is the source for information about the module. For
data associated with the module, including source, use the spec's
loader.
`name` is the absolute name of the module. `loader` is the loader
to use when loading the module. `parent` is the name of the
package the module is in. The parent is derived from the name.
`is_package` determines if the module is considered a package or
not. On modules this is reflected by the `__path__` attribute.
`origin` is the specific location used by the loader from which to
load the module, if that information is available. When filename is
set, origin will match.
`has_location` indicates that a spec's "origin" reflects a location.
When this is True, `__file__` attribute of the module is set.
`cached` is the location of the cached bytecode file, if any. It
corresponds to the `__cached__` attribute.
`submodule_search_locations` is the sequence of path entries to
search when importing submodules. If set, is_package should be
True--and False otherwise.
Packages are simply modules that (may) have submodules. If a spec
has a non-None value in `submodule_search_locations`, the import
system will consider modules loaded from the spec as packages.
Only finders (see importlib.abc.MetaPathFinder and
importlib.abc.PathEntryFinder) should modify ModuleSpec instances.
"""
def __init__(self, name, loader, *, origin=None, loader_state=None,
is_package=None):
self.name = name
self.loader = loader
self.origin = origin
self.loader_state = loader_state
self.submodule_search_locations = [] if is_package else None
# file-location attributes
self._set_fileattr = False
self._cached = None
def __repr__(self):
args = ['name={!r}'.format(self.name),
'loader={!r}'.format(self.loader)]
if self.origin is not None:
args.append('origin={!r}'.format(self.origin))
if self.submodule_search_locations is not None:
args.append('submodule_search_locations={}'
.format(self.submodule_search_locations))
return '{}({})'.format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
smsl = self.submodule_search_locations
try:
return (self.name == other.name and
self.loader == other.loader and
self.origin == other.origin and
smsl == other.submodule_search_locations and
self.cached == other.cached and
self.has_location == other.has_location)
except AttributeError:
return False
@property
def cached(self):
if self._cached is None:
if self.origin is not None and self._set_fileattr:
if _bootstrap_external is None:
raise NotImplementedError
self._cached = _bootstrap_external._get_cached(self.origin)
return self._cached
@cached.setter
def cached(self, cached):
self._cached = cached
@property
def parent(self):
"""The name of the module's parent."""
if self.submodule_search_locations is None:
return self.name.rpartition('.')[0]
else:
return self.name
@property
def has_location(self):
return self._set_fileattr
@has_location.setter
def has_location(self, value):
self._set_fileattr = bool(value)
def spec_from_loader(name, loader, *, origin=None, is_package=None):
"""Return a module spec based on various loader methods."""
if hasattr(loader, 'get_filename'):
if _bootstrap_external is None:
raise NotImplementedError
spec_from_file_location = _bootstrap_external.spec_from_file_location
if is_package is None:
return spec_from_file_location(name, loader=loader)
search = [] if is_package else None
return spec_from_file_location(name, loader=loader,
submodule_search_locations=search)
if is_package is None:
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
is_package = None # aka, undefined
else:
# the default
is_package = False
return ModuleSpec(name, loader, origin=origin, is_package=is_package)
_POPULATE = object()
def _spec_from_module(module, loader=None, origin=None):
# This function is meant for use in _setup().
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return spec
name = module.__name__
if loader is None:
try:
loader = module.__loader__
except AttributeError:
# loader will stay None.
pass
try:
location = module.__file__
except AttributeError:
location = None
if origin is None:
if location is None:
try:
origin = loader._ORIGIN
except AttributeError:
origin = None
else:
origin = location
try:
cached = module.__cached__
except AttributeError:
cached = None
try:
submodule_search_locations = list(module.__path__)
except AttributeError:
submodule_search_locations = None
spec = ModuleSpec(name, loader, origin=origin)
spec._set_fileattr = False if location is None else True
spec.cached = cached
spec.submodule_search_locations = submodule_search_locations
return spec
def _init_module_attrs(spec, module, *, override=False):
# The passed-in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (override or getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if override or getattr(module, '__loader__', None) is None:
loader = spec.loader
if loader is None:
# A backward compatibility hack.
if spec.submodule_search_locations is not None:
if _bootstrap_external is None:
raise NotImplementedError
_NamespaceLoader = _bootstrap_external._NamespaceLoader
loader = _NamespaceLoader.__new__(_NamespaceLoader)
loader._path = spec.submodule_search_locations
try:
module.__loader__ = loader
except AttributeError:
pass
# __package__
if override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
# __file__/__cached__
if spec.has_location:
if override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
if override or getattr(module, '__cached__', None) is None:
if spec.cached is not None:
try:
module.__cached__ = spec.cached
except AttributeError:
pass
return module
def module_from_spec(spec):
"""Create a module based on the provided spec."""
# Typically loaders will not implement create_module().
module = None
if hasattr(spec.loader, 'create_module'):
# If create_module() returns `None` then it means default
# module creation should be used.
module = spec.loader.create_module(spec)
elif hasattr(spec.loader, 'exec_module'):
_warnings.warn('starting in Python 3.6, loaders defining exec_module() '
'must also define create_module()',
DeprecationWarning, stacklevel=2)
if module is None:
module = _new_module(spec.name)
_init_module_attrs(spec, module)
return module
def _module_repr_from_spec(spec):
"""Return the repr to use for the module."""
# We mostly replicate _module_repr() using the spec attributes.
name = '?' if spec.name is None else spec.name
if spec.origin is None:
if spec.loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, spec.loader)
else:
if spec.has_location:
return '<module {!r} from {!r}>'.format(name, spec.origin)
else:
return '<module {!r} ({})>'.format(spec.name, spec.origin)
# Used by importlib.reload() and _load_module_shim().
def _exec(spec, module):
"""Execute the spec in an existing module's namespace."""
name = spec.name
_imp.acquire_lock()
with _ModuleLockManager(name):
if sys.modules.get(name) is not module:
msg = 'module {!r} not in sys.modules'.format(name)
raise ImportError(msg, name=name)
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('missing loader', name=spec.name)
# namespace package
_init_module_attrs(spec, module, override=True)
return module
_init_module_attrs(spec, module, override=True)
if not hasattr(spec.loader, 'exec_module'):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec.loader.load_module(name)
else:
spec.loader.exec_module(module)
return sys.modules[name]
def _load_backward_compatible(spec):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec.loader.load_module(spec.name)
# The module must be in sys.modules at this point!
module = sys.modules[spec.name]
if getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
if getattr(module, '__package__', None) is None:
try:
# Since module.__path__ may not line up with
# spec.submodule_search_paths, we can't necessarily rely
# on spec.parent here.
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = spec.name.rpartition('.')[0]
except AttributeError:
pass
if getattr(module, '__spec__', None) is None:
try:
module.__spec__ = spec
except AttributeError:
pass
return module
def _load_unlocked(spec):
# A helper for direct use by the import system.
if spec.loader is not None:
# not a namespace package
if not hasattr(spec.loader, 'exec_module'):
return _load_backward_compatible(spec)
module = module_from_spec(spec)
with _installed_safely(module):
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('missing loader', name=spec.name)
# A namespace package so do nothing.
else:
spec.loader.exec_module(module)
# We don't ensure that the import-related module attributes get
# set in the sys.modules replacement case. Such modules are on
# their own.
return sys.modules[spec.name]
# A method used during testing of _load_unlocked() and by
# _load_module_shim().
def _load(spec):
"""Return a new module object, loaded by the spec's loader.
The module is not added to its parent.
If a module is already in sys.modules, that existing module gets
clobbered.
"""
_imp.acquire_lock()
with _ModuleLockManager(spec.name):
return _load_unlocked(spec)
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (built-in)>'.format(module.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if path is not None:
return None
if _imp.is_builtin(fullname):
return spec_from_loader(fullname, cls, origin='built-in')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
return spec.loader if spec is not None else None
@classmethod
def create_module(self, spec):
"""Create a built-in module"""
if spec.name not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(spec.name),
name=spec.name)
return _call_with_frames_removed(_imp.create_builtin, spec)
@classmethod
def exec_module(self, module):
"""Exec a built-in module"""
_call_with_frames_removed(_imp.exec_builtin, module)
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False
load_module = classmethod(_load_module_shim)
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(m):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (frozen)>'.format(m.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if _imp.is_frozen(fullname):
return spec_from_loader(fullname, cls, origin='frozen')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module.
This method is deprecated. Use find_spec() instead.
"""
return cls if _imp.is_frozen(fullname) else None
@classmethod
def create_module(cls, spec):
"""Use default semantics for module creation."""
@staticmethod
def exec_module(module):
name = module.__spec__.name
if not _imp.is_frozen(name):
raise ImportError('{!r} is not a frozen module'.format(name),
name=name)
code = _call_with_frames_removed(_imp.get_frozen_object, name)
exec(code, module.__dict__)
@classmethod
def load_module(cls, fullname):
"""Load a frozen module.
This method is deprecated. Use exec_module() instead.
"""
return _load_module_shim(cls, fullname)
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return _imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return True if the frozen module is a package."""
return _imp.is_frozen_package(fullname)
# Import itself ###############################################################
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
_imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
_imp.release_lock()
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
def _find_spec_legacy(finder, name, path):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
loader = finder.find_module(name, path)
if loader is None:
return None
return spec_from_loader(name, loader)
def _find_spec(name, path, target=None):
"""Find a module's loader."""
if sys.meta_path is not None and not sys.meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
# We check sys.modules here for the reload case. While a passed-in
# target will usually indicate a reload there is no guarantee, whereas
# sys.modules provides one.
is_reload = name in sys.modules
for finder in sys.meta_path:
with _ImportLockContext():
try:
find_spec = finder.find_spec
except AttributeError:
spec = _find_spec_legacy(finder, name, path)
if spec is None:
continue
else:
spec = find_spec(name, path, target)
if spec is not None:
# The parent import may have already imported this module.
if not is_reload and name in sys.modules:
module = sys.modules[name]
try:
__spec__ = module.__spec__
except AttributeError:
# We use the found spec since that is the one that
# we would have used if the parent module hadn't
# beaten us to the punch.
return spec
else:
if __spec__ is None:
return spec
else:
return __spec__
else:
return spec
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError('module name must be str, not {}'.format(type(name)))
if level < 0:
raise ValueError('level must be >= 0')
if level > 0:
if not isinstance(package, str):
raise TypeError('__package__ not set to a string')
elif package not in sys.modules:
msg = ('Parent module {!r} not loaded, cannot perform relative '
'import')
raise SystemError(msg.format(package))
if not name and level == 0:
raise ValueError('Empty module name')
_ERR_MSG_PREFIX = 'No module named '
_ERR_MSG = _ERR_MSG_PREFIX + '{!r}'
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent)
raise ImportError(msg, name=name) from None
spec = _find_spec(name, path)
if spec is None:
raise ImportError(_ERR_MSG.format(name), name=name)
else:
module = _load_unlocked(spec)
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
return module
def _find_and_load(name, import_):
"""Find and load the module, and release the import lock."""
with _ModuleLockManager(name):
return _find_and_load_unlocked(name, import_)
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
_imp.acquire_lock()
if name not in sys.modules:
return _find_and_load(name, _gcd_import)
module = sys.modules[name]
if module is None:
_imp.release_lock()
message = ('import of {} halted; '
'None in sys.modules'.format(name))
raise ImportError(message, name=name)
_lock_unlock_module(name)
return module
def _handle_fromlist(module, fromlist, import_):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
if '*' in fromlist:
fromlist = list(fromlist)
fromlist.remove('*')
if hasattr(module, '__all__'):
fromlist.extend(module.__all__)
for x in fromlist:
if not hasattr(module, x):
from_name = '{}.{}'.format(module.__name__, x)
try:
_call_with_frames_removed(import_, from_name)
except ImportError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if str(exc).startswith(_ERR_MSG_PREFIX):
if exc.name == from_name:
continue
raise
return module
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get('__package__')
if package is None:
package = globals['__name__']
if '__path__' not in globals:
package = package.rpartition('.')[0]
return package
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"""Import a module.
The 'globals' argument is used to infer where the import is occuring from
to handle relative imports. The 'locals' argument is ignored. The
'fromlist' argument specifies what should exist as attributes on the module
being imported (e.g. ``from module import <fromlist>``). The 'level'
argument represents the package location to import from in a relative
import (e.g. ``from ..pkg import mod`` would have a 'level' of 2).
"""
if level == 0:
module = _gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = _gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return _gcd_import(name.partition('.')[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition('.')[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
return sys.modules[module.__name__[:len(module.__name__)-cut_off]]
else:
return _handle_fromlist(module, fromlist, _gcd_import)
def _builtin_from_name(name):
spec = BuiltinImporter.find_spec(name)
if spec is None:
raise ImportError('no built-in module named ' + name)
return _load_unlocked(spec)
def _setup(sys_module, _imp_module):
"""Setup importlib by importing needed built-in modules and injecting them
into the global namespace.
As sys is needed for sys.modules access and _imp is needed to load built-in
modules, those two modules must be explicitly passed in.
"""
global _imp, sys
_imp = _imp_module
sys = sys_module
# Set up the spec for existing builtin/frozen modules.
module_type = type(sys)
for name, module in sys.modules.items():
if isinstance(module, module_type):
if name in sys.builtin_module_names:
loader = BuiltinImporter
elif _imp.is_frozen(name):
loader = FrozenImporter
else:
continue
spec = _spec_from_module(module, loader)
_init_module_attrs(spec, module)
# Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
for builtin_name in ('_warnings',):
if builtin_name not in sys.modules:
builtin_module = _builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
# Directly load the _thread module (needed during bootstrap).
try:
thread_module = _builtin_from_name('_thread')
except ImportError:
# Python was built without threads
thread_module = None
setattr(self_module, '_thread', thread_module)
# Directly load the _weakref module (needed during bootstrap).
weakref_module = _builtin_from_name('_weakref')
setattr(self_module, '_weakref', weakref_module)
def _install(sys_module, _imp_module):
"""Install importlib as the implementation of import."""
_setup(sys_module, _imp_module)
sys.meta_path.append(BuiltinImporter)
sys.meta_path.append(FrozenImporter)
global _bootstrap_external
import _frozen_importlib_external
_bootstrap_external = _frozen_importlib_external
_frozen_importlib_external._install(sys.modules[__name__])
|
|
#! /usr/bin/env python
"""
couchbasekit.schema
~~~~~~~~~~~~~~~~~~~
:website: http://github.com/kirpit/couchbasekit
:copyright: Copyright 2013, Roy Enjoy <kirpit *at* gmail.com>, see AUTHORS.txt.
:license: MIT, see LICENSE.txt for details.
"""
from abc import ABCMeta
import datetime
from dateutil.parser import parse
from couchbasekit.fields import CustomField
from couchbasekit.errors import StructureError
ALLOWED_TYPES = (
bool,
int,
long,
float,
unicode,
basestring,
list,
dict,
datetime.datetime,
datetime.date,
datetime.time,
)
class SchemaDocument(dict):
"""Schema document class that handles validations and restoring raw
couchbase documents into Python values as defined in model documents.
Under normal circumstances, you don't use or inherit this class at all,
because it is only being used by :class:`couchbasekit.document.Document`
class.
:param seq: Document data to store at initialization, defaults to None.
:type seq: dict
:raises: :exc:`couchbasekit.errors.StructureError` if the minimum
structure requirements wasn't satisfied.
"""
__metaclass__ = ABCMeta
StructureError = StructureError
__key_field__ = None
doc_type = None
structure = dict()
default_values = dict()
required_fields = tuple()
is_new_record = True
def __init__(self, seq=None, **kwargs):
# check the required attributes
if not isinstance(self.__bucket_name__, str) or \
not isinstance(self.doc_type, str) or \
not isinstance(self.structure, dict):
raise self.StructureError(msg="Structure is not properly "
"set for %s." % type(self).__name__)
# check self.__key_field__ if correct
if self.__key_field__ and self.__key_field__ not in self.structure:
raise self.StructureError(
msg="Document key field must be within the "
"structure, '%s' is given." % str(self.__key_field__)
)
# insert doc_type into the structure
self.structure.update(doc_type=unicode)
seq = seq if isinstance(seq, dict) else {}
super(SchemaDocument, self).__init__(seq, **kwargs)
def _decode_dict(self, structure, mapping):
for skey, svalue in structure.iteritems():
map_keys = mapping.keys()
# this is a type:type structure
if isinstance(skey, type) and \
any([not isinstance(k, skey) for k in map_keys]):
new_keys = [self._decode_item(skey, k) for k in map_keys]
new_values = [self._decode_item(svalue, v) for v in mapping.values()]
return dict(zip(new_keys, new_values))
# item is not within the mapping
elif skey not in mapping:
continue
# decode only mapping value
else:
mapping[skey] = self._decode_item(svalue, mapping.get(skey))
return mapping
def _decode_item(self, stype, value):
new_value = value
safe_types = (bool, int, long, float, unicode, basestring, list, dict)
# newly created or safe type
if self.is_new_record or stype in safe_types:
return value
# fix datetime
elif stype is datetime.datetime and \
not isinstance(value, datetime.datetime):
# see: http://bugs.python.org/issue15873
# see: http://bugs.python.org/issue6641
new_value = parse(value)
# fix date
elif stype is datetime.date and not isinstance(value, datetime.date):
new_value = parse(value).date()
# fix time
elif stype is datetime.time and not isinstance(value, datetime.time):
# see: http://bugs.python.org/issue15873
# see: http://bugs.python.org/issue6641
new_value = parse(value).timetz()
# fix CustomField
elif isinstance(stype, type) and issubclass(stype, CustomField) and \
not isinstance(value, stype):
new_value = stype(value)
# fix document relation
elif isinstance(stype, type) and issubclass(stype, SchemaDocument) and \
not isinstance(value, stype):
if getattr(stype, '__key_field__') is not None:
doc_type, key = value.split('_', 1)
new_value = stype(key)
else:
new_value = stype(value)
# fix python list [instances]
elif isinstance(stype, list) and isinstance(value, list) and \
len(stype)==1 and any([not isinstance(v, stype[0]) for v in value]):
new_value = [self._decode_item(stype[0], v) for v in value]
# the type is a dict instance, decode recursively
elif isinstance(stype, dict) and isinstance(value, dict):
new_value = self._decode_dict(stype, value)
return new_value
def __getitem__(self, item):
# usual error if key not found
if item not in self:
raise KeyError(item)
value = self.get(item)
# TODO: schemaless should be converted as well
# schemaless or out of structure
if item not in self.structure:
return value
# make sure the accessed value respects our structure
try:
new_value = self._decode_item(self.structure[item], value)
except ValueError:
raise ValueError(
"Incorrect value for the field %s, '%s' was given." % (item, value)
)
# cache it
if new_value is not value:
self[item] = new_value
return new_value
def load(self):
"""Helper function to pre-load all the raw document values into Python
ones, custom types and/or other document relations as they are defined in
model document.
This is only useful when you need the instance to convert all its raw
values into Python types, custom fields and/or other document relations
*before* sending that object to somewhere else. For example, sending a
``User`` document to your framework's ``login(request, user)`` function.
If your code is the only one accessing its values such as;
``user.posts``, you don't have to ``.load()`` it as they're
auto-converted and cached on-demand.
Returns the instance itself (a.k.a. chaining) so you can do:
>>> book = Book('hhg2g').load()
:returns: The Document instance itself on which was called from.
"""
[getattr(self, k) for k in self.iterkeys()]
return self
def _validate(self, structure, mapping):
# check the dict structure
for skey, svalue in structure.iteritems():
# STRUCTURE KEY (FIELD) IS A TYPE
# i.e. {unicode: int}
if skey in ALLOWED_TYPES:
# if it's a type pair, must be the only item
if not len(structure)==1:
raise self.StructureError(
msg="Type pairs must be the only item in a dictionary, "
"there are %d." % len(structure)
)
# key instance must be hash()'able at this point
# but we can't catch'em all as every instance is
# not simply created by skey(), unfortunately
try: hash(skey())
except TypeError as why:
if 'unhashable type' in why.message:
raise self.StructureError(
msg="Structure keys must be hashable, "
"'%s' given." % skey.__name__
)
# yes, we ignore the rest of TypeErrors
pass
# check all the key types in the dict
for k in mapping.iterkeys():
if not isinstance(k, skey):
raise self.StructureError(k, skey, k)
# structure value is a list [instance]
if isinstance(svalue, list):
# and must have only 1 item
if not len(svalue)==1:
raise self.StructureError(
msg="List values must have only 1 item, "
"'%s' had %d." % (skey, len(svalue))
)
elif not (isinstance(svalue[0], type) and
(svalue[0] in ALLOWED_TYPES or
issubclass(svalue[0], (CustomField, SchemaDocument)))):
raise self.StructureError(
msg="A list has an invalid option in its "
"structure, '%s' is given." % svalue[0]
)
for k, list_val in mapping.iteritems():
if not all([isinstance(v, svalue[0]) for v in list_val]):
raise self.StructureError(k, svalue, list_val)
# structure value is an ALLOWED_TYPE, CustomField or Document
elif svalue in ALLOWED_TYPES or \
(isinstance(svalue, type) and
issubclass(svalue, (CustomField, SchemaDocument))):
for k, v in mapping.iteritems():
if not isinstance(v, svalue):
raise self.StructureError(k, svalue, v)
continue
# STRUCTURE KEY (FIELD) IS A STRING
# field not set or None anyway
if skey not in mapping or mapping.get(skey) is None:
continue
# is it in allowed types?
elif svalue in ALLOWED_TYPES and isinstance(mapping[skey], svalue):
continue
# some custom type or document relation?
elif isinstance(svalue, type) and \
issubclass(svalue, (CustomField, SchemaDocument)) and \
isinstance(mapping[skey], svalue):
continue
# structure value is a list [instance]
elif isinstance(svalue, list):
# and must have only 1 item
if not len(svalue)==1:
raise self.StructureError(
msg="List values must have only 1 item, "
"'%s' had %d." % (skey, len(svalue))
)
if isinstance(svalue[0], type) and \
(svalue[0] in ALLOWED_TYPES or
issubclass(svalue[0], (CustomField, SchemaDocument))) and \
isinstance(mapping[skey], list) and \
all([isinstance(v, svalue[0]) for v in mapping[skey]]):
continue
# it's a dictionary instance, check recursively
elif isinstance(svalue, dict) and \
isinstance(mapping[skey], dict):
if self._validate(svalue, mapping[skey]):
continue
# houston, we got a problem!
raise self.StructureError(skey, svalue, mapping[skey])
return True
def validate(self):
"""Validates the document object with current values, always called
within :meth:`couchbasekit.document.Document.save` method.
:returns: Always True, or raises
:exc:`couchbasekit.errors.StructureError` exception.
:raises: :exc:`couchbasekit.errors.StructureError` if
any validation problem occurs.
"""
# __key_field__ value must be provided if defined
if self.__key_field__ and self.__key_field__ not in self:
raise self.StructureError(msg="Key field '%s' is defined "
"but not provided." % self.__key_field__)
# check the required fields first
for required in self.required_fields:
if (required not in self and required not in self.default_values) or \
(required in self and self[required] is None):
raise self.StructureError(
msg = "Required field for '%s' is missing." % required
)
return self._validate(self.structure, self)
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import shutil
import unittest
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.io.cif import CifFile, CifParser
from pymatgen.io.feff.inputs import Atoms, Header, Potential, Tags
from pymatgen.io.feff.sets import FEFFDictSet, MPELNESSet, MPEXAFSSet, MPXANESSet
from pymatgen.util.testing import PymatgenTest
class FeffInputSetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.header_string = """* This FEFF.inp file generated by pymatgen
TITLE comment: From cif file
TITLE Source: CoO19128.cif
TITLE Structure Summary: Co2 O2
TITLE Reduced formula: CoO
TITLE space group: (P6_3mc), space number: (186)
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.000000 90.000000 120.000000
TITLE sites: 4
* 1 Co 0.333333 0.666667 0.503676
* 2 Co 0.666667 0.333333 0.003676
* 3 O 0.333333 0.666667 0.121324
* 4 O 0.666667 0.333333 0.621325"""
cif_file = os.path.join(PymatgenTest.TEST_FILES_DIR, "CoO19128.cif")
cls.structure = CifParser(cif_file).get_structures()[0]
cls.absorbing_atom = "O"
cls.mp_xanes = MPXANESSet(cls.absorbing_atom, cls.structure)
def test_get_header(self):
comment = "From cif file"
header = str(self.mp_xanes.header(source="CoO19128.cif", comment=comment))
print(header)
ref = self.header_string.splitlines()
last4 = [" ".join(l.split()[2:]) for l in ref[-4:]]
for i, l in enumerate(header.splitlines()):
if i < 9:
self.assertEqual(l, ref[i])
else:
s = " ".join(l.split()[2:])
self.assertIn(s, last4)
def test_getfefftags(self):
tags = self.mp_xanes.tags.as_dict()
self.assertEqual(tags["COREHOLE"], "FSR", "Failed to generate PARAMETERS string")
def test_get_feffPot(self):
POT = str(self.mp_xanes.potential)
d, dr = Potential.pot_dict_from_string(POT)
self.assertEqual(d["Co"], 1, "Wrong symbols read in for Potential")
def test_get_feff_atoms(self):
atoms = str(self.mp_xanes.atoms)
self.assertEqual(
atoms.splitlines()[3].split()[4],
self.absorbing_atom,
"failed to create ATOMS string",
)
def test_to_and_from_dict(self):
f1_dict = self.mp_xanes.as_dict()
f2 = MPXANESSet.from_dict(f1_dict)
self.assertEqual(f1_dict, f2.as_dict())
def test_user_tag_settings(self):
tags_dict_ans = self.mp_xanes.tags.as_dict()
tags_dict_ans["COREHOLE"] = "RPA"
tags_dict_ans["EDGE"] = "L1"
user_tag_settings = {"COREHOLE": "RPA", "EDGE": "L1"}
mp_xanes_2 = MPXANESSet(self.absorbing_atom, self.structure, user_tag_settings=user_tag_settings)
self.assertEqual(mp_xanes_2.tags.as_dict(), tags_dict_ans)
def test_eels_to_from_dict(self):
elnes = MPELNESSet(
self.absorbing_atom,
self.structure,
radius=5.0,
beam_energy=100,
beam_direction=[1, 0, 0],
collection_angle=7,
convergence_angle=6,
)
elnes_dict = elnes.as_dict()
elnes_2 = MPELNESSet.from_dict(elnes_dict)
self.assertEqual(elnes_dict, elnes_2.as_dict())
def test_eels_tags_set(self):
radius = 5.0
user_eels_settings = {
"ENERGY": "4 0.04 0.1",
"BEAM_ENERGY": "200 1 0 1",
"ANGLES": "2 3",
}
elnes = MPELNESSet(
self.absorbing_atom,
self.structure,
radius=radius,
user_eels_settings=user_eels_settings,
)
elnes_2 = MPELNESSet(
self.absorbing_atom,
self.structure,
radius=radius,
beam_energy=100,
beam_direction=[1, 0, 0],
collection_angle=7,
convergence_angle=6,
)
self.assertEqual(elnes.tags["ELNES"]["ENERGY"], user_eels_settings["ENERGY"])
self.assertEqual(elnes.tags["ELNES"]["BEAM_ENERGY"], user_eels_settings["BEAM_ENERGY"])
self.assertEqual(elnes.tags["ELNES"]["ANGLES"], user_eels_settings["ANGLES"])
self.assertEqual(elnes_2.tags["ELNES"]["BEAM_ENERGY"], [100, 0, 1, 1])
self.assertEqual(elnes_2.tags["ELNES"]["BEAM_DIRECTION"], [1, 0, 0])
self.assertEqual(elnes_2.tags["ELNES"]["ANGLES"], [7, 6])
def test_reciprocal_tags_and_input(self):
user_tag_settings = {"RECIPROCAL": "", "KMESH": "1000"}
elnes = MPELNESSet(self.absorbing_atom, self.structure, user_tag_settings=user_tag_settings)
self.assertTrue("RECIPROCAL" in elnes.tags)
self.assertEqual(elnes.tags["TARGET"], 3)
self.assertEqual(elnes.tags["KMESH"], "1000")
self.assertEqual(elnes.tags["CIF"], "Co2O2.cif")
self.assertEqual(elnes.tags["COREHOLE"], "RPA")
all_input = elnes.all_input()
self.assertNotIn("ATOMS", all_input)
self.assertNotIn("POTENTIALS", all_input)
elnes.write_input()
structure = Structure.from_file("Co2O2.cif")
self.assertTrue(self.structure.matches(structure))
os.remove("HEADER")
os.remove("PARAMETERS")
os.remove("feff.inp")
os.remove("Co2O2.cif")
def test_small_system_EXAFS(self):
exafs_settings = MPEXAFSSet(self.absorbing_atom, self.structure)
self.assertFalse(exafs_settings.small_system)
self.assertTrue("RECIPROCAL" not in exafs_settings.tags)
user_tag_settings = {"RECIPROCAL": ""}
exafs_settings_2 = MPEXAFSSet(
self.absorbing_atom,
self.structure,
nkpts=1000,
user_tag_settings=user_tag_settings,
)
self.assertFalse(exafs_settings_2.small_system)
self.assertTrue("RECIPROCAL" not in exafs_settings_2.tags)
def test_number_of_kpoints(self):
user_tag_settings = {"RECIPROCAL": ""}
elnes = MPELNESSet(
self.absorbing_atom,
self.structure,
nkpts=1000,
user_tag_settings=user_tag_settings,
)
self.assertEqual(elnes.tags["KMESH"], [12, 12, 7])
def test_large_systems(self):
struct = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "La4Fe4O12.cif"))
user_tag_settings = {"RECIPROCAL": "", "KMESH": "1000"}
elnes = MPELNESSet("Fe", struct, user_tag_settings=user_tag_settings)
self.assertNotIn("RECIPROCAL", elnes.tags)
self.assertNotIn("KMESH", elnes.tags)
self.assertNotIn("CIF", elnes.tags)
self.assertNotIn("TARGET", elnes.tags)
def test_postfeffset(self):
self.mp_xanes.write_input(os.path.join(".", "xanes_3"))
feff_dict_input = FEFFDictSet.from_directory(os.path.join(".", "xanes_3"))
self.assertTrue(feff_dict_input.tags == Tags.from_file(os.path.join(".", "xanes_3/feff.inp")))
self.assertTrue(str(feff_dict_input.header()) == str(Header.from_file(os.path.join(".", "xanes_3/HEADER"))))
feff_dict_input.write_input("xanes_3_regen")
origin_tags = Tags.from_file(os.path.join(".", "xanes_3/PARAMETERS"))
output_tags = Tags.from_file(os.path.join(".", "xanes_3_regen/PARAMETERS"))
origin_mole = Atoms.cluster_from_file(os.path.join(".", "xanes_3/feff.inp"))
output_mole = Atoms.cluster_from_file(os.path.join(".", "xanes_3_regen/feff.inp"))
original_mole_dist = np.array(origin_mole.distance_matrix[0, :]).astype(np.float64)
output_mole_dist = np.array(output_mole.distance_matrix[0, :]).astype(np.float64)
original_mole_shell = [x.species_string for x in origin_mole]
output_mole_shell = [x.species_string for x in output_mole]
self.assertTrue(np.allclose(original_mole_dist, output_mole_dist))
self.assertTrue(origin_tags == output_tags)
self.assertTrue(original_mole_shell == output_mole_shell)
shutil.rmtree(os.path.join(".", "xanes_3"))
shutil.rmtree(os.path.join(".", "xanes_3_regen"))
reci_mp_xanes = MPXANESSet(self.absorbing_atom, self.structure, user_tag_settings={"RECIPROCAL": ""})
reci_mp_xanes.write_input("xanes_reci")
feff_reci_input = FEFFDictSet.from_directory(os.path.join(".", "xanes_reci"))
self.assertTrue("RECIPROCAL" in feff_reci_input.tags)
feff_reci_input.write_input("Dup_reci")
self.assertTrue(os.path.exists(os.path.join(".", "Dup_reci", "HEADER")))
self.assertTrue(os.path.exists(os.path.join(".", "Dup_reci", "feff.inp")))
self.assertTrue(os.path.exists(os.path.join(".", "Dup_reci", "PARAMETERS")))
self.assertFalse(os.path.exists(os.path.join(".", "Dup_reci", "ATOMS")))
self.assertFalse(os.path.exists(os.path.join(".", "Dup_reci", "POTENTIALS")))
tags_original = Tags.from_file(os.path.join(".", "xanes_reci/feff.inp"))
tags_output = Tags.from_file(os.path.join(".", "Dup_reci/feff.inp"))
self.assertTrue(tags_original == tags_output)
stru_orig = Structure.from_file(os.path.join(".", "xanes_reci/Co2O2.cif"))
stru_reci = Structure.from_file(os.path.join(".", "Dup_reci/Co2O2.cif"))
self.assertTrue(stru_orig.__eq__(stru_reci))
shutil.rmtree(os.path.join(".", "Dup_reci"))
shutil.rmtree(os.path.join(".", "xanes_reci"))
def test_post_distdiff(self):
feff_dict_input = FEFFDictSet.from_directory(os.path.join(PymatgenTest.TEST_FILES_DIR, "feff_dist_test"))
self.assertTrue(
feff_dict_input.tags == Tags.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "feff_dist_test/feff.inp"))
)
self.assertTrue(
str(feff_dict_input.header())
== str(Header.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "feff_dist_test/HEADER")))
)
feff_dict_input.write_input("feff_dist_regen")
origin_tags = Tags.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "feff_dist_test/PARAMETERS"))
output_tags = Tags.from_file(os.path.join(".", "feff_dist_regen/PARAMETERS"))
origin_mole = Atoms.cluster_from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "feff_dist_test/feff.inp"))
output_mole = Atoms.cluster_from_file(os.path.join(".", "feff_dist_regen/feff.inp"))
original_mole_dist = np.array(origin_mole.distance_matrix[0, :]).astype(np.float64)
output_mole_dist = np.array(output_mole.distance_matrix[0, :]).astype(np.float64)
original_mole_shell = [x.species_string for x in origin_mole]
output_mole_shell = [x.species_string for x in output_mole]
self.assertTrue(np.allclose(original_mole_dist, output_mole_dist))
self.assertTrue(origin_tags == output_tags)
self.assertTrue(original_mole_shell == output_mole_shell)
shutil.rmtree(os.path.join(".", "feff_dist_regen"))
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
import os
import pylab as pl
import sys
from sensor_msgs.msg import CompressedImage
from aidu_elevator.msg import Button
from images import convert
from pymongo import MongoClient
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.svm import SVC
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from time import clock
db = MongoClient()
db_buttons = db['aidu']['elevator_buttons']
db_untested = db['aidu']['elevator_buttons_untested']
labels = []
data_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../data/'))
model_directory = os.path.abspath(os.path.join(data_path, 'classification_model'))
label_map = {'1': Button.BUTTON_1,
'2': Button.BUTTON_2,
'3': Button.BUTTON_3,
'4': Button.BUTTON_4,
'B': Button.BUTTON_B,
'K': Button.BUTTON_K,
'up': Button.BUTTON_UP,
'down': Button.BUTTON_DOWN,
'none': Button.BUTTON_NONE}
certainties = {k: 0 for v, k in label_map.iteritems()}
inverse_label_map = {v: k for k, v in label_map.items()}
def threshold(image):
# Pre process image
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply adapative gaussian threshold
image = cv2.adaptiveThreshold(image, 225, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, 3)
image = cv2.medianBlur(image, 3)
# Return output
return 255 - image
def progressor(generator):
for idx, item in enumerate(generator):
sys.stdout.write('\r[%d samples loaded]' % idx)
sys.stdout.flush()
yield item
sys.stdout.write('\n')
def process_label(label):
if label is None:
label = 'none'
if label not in labels:
labels.append(label)
#return labels.index(label)
return label
def get_feature_vector(image):
image = threshold(image)
image = cv2.medianBlur(image, 5)
return image.flatten().astype(np.float)
def get_onoff_feature_vector(image):
image = np.array(image)
image[20:80, 20:80] = [0, 0, 0]
ORANGE_MIN = np.array([12, 80, 100],np.uint8)
ORANGE_MAX = np.array([26, 255, 215],np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
image = cv2.inRange(image, ORANGE_MIN, ORANGE_MAX)
return image.flatten().astype(np.float)
def train_save_test(clf, X, y, filename, lbl):
print 'Training'
clf.fit(X,y)
print 'Saving to file'
joblib.dump(clf, os.path.join(model_directory, filename), compress=9)
print 'Testing'
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
clf.fit(X_train,y_train)
pred = clf.predict(X_test)
cm = confusion_matrix(y_test, pred, labels=lbl)
cm = cm / cm.astype(np.float).sum(axis=1)
print classification_report(y_test, pred, labels=lbl)
# Show confusion matrix in a separate window
pl.matshow(cm)
pl.title('Confusion matrix')
pl.colorbar()
pl.ylabel('True label')
pl.xlabel('Predicted label')
pl.show()
def train():
print 'Button identifier classifier'
clf = Pipeline([
('normalizer', MinMaxScaler()),
('classifier', OneVsRestClassifier(LogisticRegression(penalty='l2', C=1e-1)))] # LogisticRegression(penalty='l2', C=1e-4)
)
X = []
y = []
print 'Getting data'
n = 0
for button in progressor(db_buttons.find({})):
#x = np.array( button['image'], dtype=np.uint8 )
#img = cv2.imdecode(x, 1)
img = convert(button['image'], input_type='ros', output_type='cv2')
vector = get_feature_vector(img)
#display_button(img, button['label'], '')
#key = cv2.waitKey() % 256
#if(key == ord('s')):
# n += 1
# cv2.imwrite('/home/rolf/Desktop/%d.jpg' % n, img)
#print vector
X.append(vector)
y.append(process_label(button['label']))
train_save_test(clf, X, y, 'ovr_lr.pkl', labels)
print 'Button on/off classifier'
X = []
y = []
print 'Getting data'
for button in progressor(db_buttons.find()):
#x = np.array( button['img'], dtype=np.uint8 )
#img = cv2.imdecode(x, 1)
img = convert(button['image'], input_type='ros', output_type='cv2')
vector = get_onoff_feature_vector(img)
#print button.get('on')
#vector = get_onoff_feature_vector(img)
#print sum(vector)
#print len(vector)
X.append(vector)
y.append(1 if button.get('on') == True else 0)
clf = Pipeline([
('normalizer', MinMaxScaler()),
('classifier', LogisticRegression(penalty='l2', C=1e1, class_weight='auto'))]#LogisticRegression(penalty='l2', C=1e0))]
)
train_save_test(clf, X, y, 'onoff_lr.pkl', [0,1])
def assign_message_label(button_message, label):
button_message.button_type = label_map.get(label)
def display_button(img, label_str, onoff_str=''):
#img = convert(img, input_type='ros', output_type='cv2')
cv2.putText(img, label_str, (2,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 255)
cv2.putText(img, onoff_str, (2,94), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 255)
cv2.imshow('Button', img)
cv2.waitKey(10)
def callback(button):
try:
img = convert(button.image.data, input_type='ros', output_type='cv2')
vector = get_feature_vector(img)
label = clf.predict([vector])[0]
try:
p = np.max(clf.predict_proba([vector])[0])
except:
p = 1.0
#rospy.loginfo('%s: %f' % (label, p))
assign_message_label(button, label)
if button.button_type != button.BUTTON_NONE:
rospy.loginfo('Uncertain found button %s (%.2f%%) (C:%.2f)' % (label, p, certainties[button.button_type]))
#display_button(img, label, '%f' % p)
for k, v in certainties.iteritems():
certainties[k] = max(0, certainties[k] - 1)
if button.button_type != button.BUTTON_NONE and p > 0.7:
certainties[button.button_type] = min(10, certainties[k] + 3 * p)
if certainties[button.button_type] > 2.7:
#rospy.loginfo('%s - %.3f' % (label, p))
rospy.loginfo('Found button %s' % label)
on = onoff_clf.predict(get_onoff_feature_vector(img))
button.on = True if on else False
label_str = '%s (%.0f%%)' % (label, 100.0*p)
onoff_str = 'on' if on else 'off'
button_publisher.publish(button)
except Exception as e:
rospy.logwarn(e)
def run_node():
global clf, onoff_clf, button_publisher
try:
clf = joblib.load(os.path.join(model_directory, 'ovr_lr.pkl'))
onoff_clf = joblib.load(os.path.join(model_directory, 'onoff_lr.pkl'))
rospy.init_node('button_classifier', anonymous=True)
rospy.Subscriber("/elevator/button", Button, callback)
button_publisher = rospy.Publisher("/elevator/button/classified", Button)
rospy.spin()
except Exception as e:
rospy.logerr(e)
return
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'train':
train()
else:
run_node()
if __name__ == "__main__":
main()
|
|
import sys, os, re
import biana
try: from biana import *
except: sys.exit(10)
"""
NetworkAnalysis
2017 Joaquim Aguirre-Plans
Structural Bioinformatics Laboratory
Universitat Pompeu Fabra
"""
def generate_network(targets_list, targets_type_id, radius, taxid, translation_file, translation_type_id, node_file, edge_file,
restricted_to_TAP = False, restricted_to_Y2H = False, restricted_to_user = None,
except_TAP = False, except_Y2H = False, except_user = None,
database = 'BIANA_JUN_2017', unification_protocol = 'geneid_seqtax_v1',
output_format = 'sif', verbose = False):
"""
Generates a complete interactome network if the radius = 0.
Generates a network of expansion if the radius > 0. This means that the center of the network will be the targets,
and the network will be expanded as many levels as indicated in the parameter radius.
Parameters:
@targets_list: List of targets from which the network of expansion starts
@targets_type_id: Type of ID of the targets introduced (i.e. geneid, uniprotaccession, uniprotentry)
@radius: Number of levels of expansion of the network. If 0, the complete interactome will be generated
@taxid: Restrict the proteins of the network to a concrete species, indicated by its Taxonomy ID
@translation_file: The function will generate a network in BIANA codes, but also a translation file containing the
translations from BIANA codes to the desired type of ID
@translation_type_id: Indicate the type of ID in which the function will translate the BIANA codes (i.e. geneid,
uniprotaccession, uniprotentry...)
@node_file: Name of the output file with all the nodes of the network
@edge_file: Name of the output file with all the edges (interactions) of the network
@restricted_to_TAP: Restrict the interactions of the network to the ones that have been reported by Tandem Affinity
Purification (TAP) methods
@restricted_to_Y2H: Restrict the interactions of the network to the ones that have been reported by Yeast to Hybrid
(Y2H) methods
@restricted_to_user: File containing methods IDs separated by newline characters. It will restrict the interactions of
the network to the ones reported by the methods indicated in the file
@except_TAP: Restrict the interactions of the network to the ones that have been reported by at least one
method which is not a Tandem Affinity Purification method
@except_Y2H: Restrict the interactions of the network to the ones that have been reported by at least one
method which is not a Yeast to Hybrid method
@except_user: File containing methods IDs separated by newline characters. Restrict the interactions of the
network to the ones that have been reported by at least one method which is not in the file
@database: Name of the BIANA database used.
default: 'BIANA_JUN_2017'
@unification_protocol: Name of the unification protocol used in the BIANA database selected.
default: 'geneid_seqtax_v1'
@output_format: Output format of the network. It can be:
'sif': <node1>\tscore\t<node2>
'netscore': <node1>\t<node2>\t<score>
'multi-fields' : <node1>\t<node2>\t<sources>\t<method_ids>\t<method_names>\t<pmids>
default: 'sif'
@verbose: default: False
"""
# Parameters that I have decided to fix
restricted_to_seeds = False
minimum_number_of_methods = 1
minimum_number_of_db = 1
# Restricted by the user:
if not fileExist(restricted_to_user):
print "No restriction on methods selected by the user"
user_selection=False
else:
use_methods=[]
input_method=open(restricted_to_user)
for line in input_method:
fields = line.strip().split("\t")
use_methods.append(fields[0])
input_method.close()
user_selection=True
#print "Input to use only Methods:",repr(use_methods)
if not fileExist(except_user):
print "No rejection of methods selected by the user"
user_rejection=False
else:
no_methods=[]
input_method=open(except_user)
for line in input_method:
fields = line.strip().split("\t")
no_methods.append(fields[0])
input_method.close()
user_rejection=True
#print "Input of rejected Methods:",repr(no_methods)
session = create_new_session(sessionID="biana_session",dbname=database,dbhost="localhost",
dbuser="quim",dbpassword=None,
unification_protocol=unification_protocol)
# Create network network of expansion if the radius is larger than 0
if radius > 0:
if len(targets_list) < 3:
print "There are not enough targets to perform the analysis"
sys.exit(10)
else:
level=radius
proteome = session.create_new_user_entity_set( identifier_description_list =targets_list,
attribute_restriction_list=[("taxid",taxid)],
id_type=targets_type_id,new_user_entity_set_id="proteome",
negative_attribute_restriction_list=[] )
# Create complete network or interactome
else:
level=0
proteome = session.create_new_user_entity_set( identifier_description_list = [("taxid",taxid)],
attribute_restriction_list=[], id_type="embedded",
new_user_entity_set_id="proteome",
negative_attribute_restriction_list=[] )
# Select interactions
if restricted_to_TAP:
session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] ,
relation_attribute_restriction_list = [("Method_id",400)],
#relation_attribute_restriction_list = [("psimi_name","affinity technology")],
include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False)
elif restricted_to_Y2H:
#print "restricting to y2h"
session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] ,
relation_attribute_restriction_list = [("Method_id",18)],
#relation_attribute_restriction_list = [("psimi_name","y2h2")],
include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False)
else:
session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] ,
include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False)
# Summary of interactions
out_network = open(edge_file,'w')
all_interactions = proteome.getRelations()
print "Num interactions:", len(all_interactions)
nodes=set()
# Get all the user entity ids from the user entity set 'proteome'
all_uEs = proteome.get_user_entity_ids()
# Obtain a dictionary user entity ID => type
uEId_to_type = session.dbAccess.get_user_entity_type(unification_protocol, all_uEs)
skip_interactions=0
for (uE_id1, uE_id2) in all_interactions:
#self.dbAccess.get_external_entities_dict( externalEntityIdsList = [external_entity_relation_id] )
# Get TYPE of user entity
uE1_type = uEId_to_type[uE_id1]
uE2_type = uEId_to_type[uE_id2]
# If type is not protein, we skip the interaction
if uE1_type != 'protein' or uE2_type != 'protein':
if verbose:
print('Skipping interaction because the type of one of the user entities is not protein!')
print('Node 1: {}\tType: {}'.format(uE_id1, uE1_type))
print('Node 2: {}\tType: {}'.format(uE_id2, uE2_type))
skip_interactions=skip_interactions+1
continue
eErIDs_list = proteome.get_external_entity_relation_ids(uE_id1, uE_id2)
method_names = set()
method_ids = set()
source_databases = set()
use_method_ids=set()
pubmed_ids = set()
relationObj_dict = session.dbAccess.get_external_entities_dict(
externalEntityIdsList = eErIDs_list, attribute_list = [],
relation_attribute_list = ["method_id","psimi_name","pubmed"], participant_attribute_list = [] )
num_methods=0
for current_eErID in eErIDs_list:
relationObj = relationObj_dict[current_eErID]
if verbose:
print "Interaction: (",uE_id1,",",uE_id2,")"
print relationObj
#if relationObj.get_attribute(attribute_identifier="psimi_name") is not None:
# print "\t".join([ x.value for x in relationObj.get_attribute(attribute_identifier="psimi_name") ])
#if relationObj.get_attribute(attribute_identifier="method_id") is not None:
#print "\t".join([ x.value for x in relationObj.get_attribute(attribute_identifier="method_id") ])
#print relationObj.get_attributes_dict()
#print [ x.value for x in relationObj.get_attributes_dict()["psimi_name"] ]
#print [ x.value for x in relationObj.get_attributes_dict()["method_id"] ]
if "psimi_name" in relationObj.get_attributes_dict():
method_names.update([ str(x.value) for x in relationObj.get_attributes_dict()["psimi_name"] ])
if "method_id" in relationObj.get_attributes_dict():
method_ids.update([ x.value for x in relationObj.get_attributes_dict()["method_id"]])
if "pubmed" in relationObj.get_attributes_dict():
pubmed_ids.update([ x.value for x in relationObj.get_attributes_dict()["pubmed"]])
source_databases.add(str(session.dbAccess.get_external_database(
database_id = relationObj.get_source_database()) ))
if except_TAP:
affinity = get_affinity_method_ids()
for m in method_ids:
if m not in affinity:
use_method_ids.add(m)
#print "Add", m
elif except_Y2H:
complementation = get_complementation_method_ids()
#print "check Y2H"
for m in method_ids:
if m not in complementation:
use_method_ids.add(m)
#print "Add", m
elif user_rejection:
for m in method_ids:
if m not in no_methods:
use_method_ids.add(m)
elif user_selection:
for m in method_ids:
#print "Check",repr(use_methods)
if m in set(use_methods):
use_method_ids.add(m)
elif verbose:
print "Not among selected methods ",m
else:
use_method_ids.update(method_ids)
if len(source_databases) > 0:
info_sources=";".join([str(x) for x in source_databases])
else:
if verbose:
print('Skipping interaction it has no source database!')
print('Node 1: {}\tNode 2: {}'.format(uE_id1, uE_id2))
skip_interactions=skip_interactions+1
continue
if len(method_names) > 0:
info_methods=";".join([str(x) for x in method_names])
else:
info_methods='-'
if len(use_method_ids) > 0:
info_methods_ids=";".join([str(x) for x in use_method_ids])
else:
if verbose:
print('Skipping interaction it has no method!')
print('Node 1: {}\tNode 2: {}'.format(uE_id1, uE_id2))
skip_interactions=skip_interactions+1
continue
if len(pubmed_ids) > 0:
info_pubmed_ids=";".join([str(x) for x in pubmed_ids])
else:
info_pubmed_ids='-'
num_databases=len(source_databases)
num_methods=len(use_method_ids)
num_pubmeds = len(pubmed_ids)
if verbose:
print "Methods",num_methods,info_methods,"\tSelected:",info_methods_ids
print "Databases",num_databases,info_sources
print "Pubmeds",num_pubmeds,info_pubmed_ids
if num_methods >= minimum_number_of_methods:
use=True
else:
use=False
if use and num_databases >= minimum_number_of_db:
use=True
else:
use=False
if not use:
skip_interactions=skip_interactions+1
#print method_names, method_ids, source_databases
# Output edge file
if use:
nodes.add(uE_id1)
nodes.add(uE_id2)
if verbose:
if output_format == 'multi-fields' :
out_network.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n".
format(uE_id1,uE_id2,info_sources,info_methods_ids,info_methods,info_pubmed_ids))
elif output_format == 'netscore':
out_network.write('\t{}\t{}\t{:.2f}\n'.format(uE_id1,uE_id2,1))
else:
out_network.write("{}\t{:.2f}\t{}\n".format(uE_id1,1.,uE_id2))
else:
if output_format == 'multi-fields' :
out_network.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n".
format(uE_id1,uE_id2,info_sources,info_methods_ids,info_methods,info_pubmed_ids))
elif output_format == 'netscore':
out_network.write('\t{}\t{}\t{:.2f}\n'.format(uE_id1,uE_id2,1.))
else:
out_network.write("{}\t{:.2f}\t{}\n".format(uE_id1,1.,uE_id2))
print "Num neglected interactions:", skip_interactions
out_network.close()
# If we wanted the complete interactome, the translation will be done differently
if radius == 0:
# Output node file
out_proteins = open(node_file,'w')
for protein in nodes:
if output_format == 'multi-fields':
out_proteins.write("{0}\t{1:10.2f}\t{2:10.2f}\t{3:10.2f}\n".format(protein,1.,1.,0.1))
elif output_format == 'netscore':
out_proteins.write("{0}\t{1:10.2f}\t{2:10.2f}\t{3:10.2f}\n".format(protein,1.,1.,0.1))
else:
out_proteins.write("{0}\t{1:10.2f}\n".format(protein,0.1))
out_proteins.close()
################################# TRANSLATION ####################################
out_translation = open(translation_file,'w')
for protein in nodes:
uE = session.get_user_entity(protein)
translate=set()
translate_uni=set()
if translation_type_id == "proteinsequence":
maxlen=0;
for current_id in uE.get_attribute(attribute_identifier=translation_type_id):
if maxlen < len(current_id.value.get_sequence().upper()):
maxlen=len(current_id.value.get_sequence().upper())
translation=",".join([str(current_id.value.get_sequence().upper()) for current_id in uE.get_attribute(attribute_identifier=translation_type_id) if len(str(current_id.value.get_sequence().upper())) == maxlen ] )
#print "Translation",protein,translation
#print("{0}\t'{1}'\n".format(protein,translation))
else:
##### TRANSLATION TO 'translation_type_id'
for current_id in uE.get_attribute(attribute_identifier=translation_type_id):
translate.add(current_id.value.upper())
translation="','".join(["{0}".format(x) for x in translate])
out_translation.write("{0}\t'{1}'\n".format(protein,translation))
out_translation.close()
####################################################################################
# If we wanted a network of expansion, the translation will be done differently
elif radius > 0:
seeds=set()
for seed in targets_list:
seeds.add(seed.lower())
# Output node file
out_proteins = open(node_file,'w')
translate={}
for protein in nodes:
score=0.1
uE = session.get_user_entity(protein)
for current_id in uE.get_attribute(attribute_identifier=targets_type_id):
if current_id.value.lower() in seeds:
translate.setdefault(current_id.value.lower(),[])
translate[current_id.value.lower()].append(protein)
score=1.0
if output_format == 'multi-fields':
out_proteins.write("{0}\t{1:10.2f}\t{2:10.2f}\t{3:10.2f}\n".format(protein,1.,1.,score))
elif output_format == 'netscore':
out_proteins.write("{0}\t{1:10.2f}\t{2:10.2f}\t{3:10.2f}\n".format(protein,1.,1.,score))
else:
out_proteins.write("{0}\t{1:10.2f}\n".format(protein,score))
out_proteins.close()
# Get the IDS of single nodes that were not previously found in the network
single=set()
for uE_id in proteome.get_unconnected_nodes():
single.add(uE_id)
for protein in single:
uE = session.get_user_entity(protein)
for current_id in uE.get_attribute(attribute_identifier=targets_type_id):
if current_id.value.lower() in seeds:
translate.setdefault(current_id.value.lower(),[])
translate[current_id.value.lower()].append(protein)
# Get all IDS of SEEDS, defined as "proteome", and check missing codes to be
# added for translation
allseed=set()
for uE_id in proteome.get_user_entity_ids():
allseed.add(uE_id)
for protein in allseed:
if protein not in single and protein not in nodes:
uE = session.get_user_entity(protein)
for current_id in uE.get_attribute(attribute_identifier=targets_type_id):
if current_id.value.lower() in seeds:
translate.setdefault(current_id.value.lower(),[])
translate[current_id.value.lower()].append(protein)
################################# TRANSLATION ####################################
out_translation = open("translation_seeds_to_BIANA_codes.txt",'w')
for s in seeds:
if s == '': continue
if s in translate:
codes=set(translate[s])
translation="','".join([str(x) for x in codes])
#out_translation.write("%s\t'%s'\n" % (s.upper(),translation))
out_translation.write("{0}\t'{1}'\n".format(s.upper(),translation))
else:
out_translation.write("{0}\t'Unknown'\n".format(s.upper()))
out_translation.close()
####################################################################################
# Output translation file
out_translation = open(translation_file,'w')
for protein in nodes:
uE = session.get_user_entity(protein)
translate=set()
if translation_type_id == "proteinsequence":
maxlen=0;
for current_id in uE.get_attribute(attribute_identifier=translation_type_id):
if maxlen < len(current_id.value.get_sequence().upper()):
maxlen=len(current_id.value.get_sequence().upper())
translation=",".join([str(current_id.value.get_sequence().upper()) for current_id in uE.get_attribute(attribute_identifier=translation_type_id) if len(str(current_id.value.get_sequence().upper())) == maxlen ] )
else:
for current_id in uE.get_attribute(attribute_identifier=translation_type_id):
translate.add(current_id.value.upper())
translation="','".join(["{0}".format(x) for x in translate])
out_translation.write("{0}\t'{1}'\n".format(protein,translation))
out_translation.close()
return
def fileExist (file): #Checks if a file exists AND is a file
if file is not None: return os.path.exists(file) and os.path.isfile(file)
else: return False
def get_affinity_method_ids():
affinity_dict={
'0': 'molecular interaction',
'4': 'affinity chromatography technology',
'6': 'anti bait coimmunoprecipitation',
'7': 'anti tag coimmunoprecipitation',
'8': 'array technology',
'9': 'bacterial display',
'19': 'coimmunoprecipitation',
'28': 'cosedimentation in solution',
'29': 'cosedimentation through density gradient',
'34': 'display technology',
'47': 'far western blotting',
'48': 'filamentous phage display',
'49': 'filter binding',
'50': 'flag tag coimmunoprecipitation',
'60': 'ha tag coimmunoprecipitation',
'62': 'his tag coimmunoprecipitation',
'66': 'lambda phage display',
'71': 'molecular sieving',
'73': 'mrna display',
'75': 'myc tag coimmunoprecipitation',
'81': 'peptide array',
'84': 'phage display',
'89': 'protein array',
'92': 'protein in situ array',
'95': 'proteinchip(r) on a surface-enhanced laser desorption/ionization',
'96': 'pull down',
'98': 'ribosome display',
'108': 't7 phage display',
'109': 'tap tag coimmunoprecipitation',
'115': 'yeast display',
'225': 'chromatin immunoprecipitation array',
'400': 'affinity technology',
'402': 'chromatin immunoprecipitation assay',
'405': 'competition binding',
'411': 'enzyme linked immunosorbent assay',
'412': 'electrophoretic mobility supershift assay',
'413': 'electrophoretic mobility shift assay',
'440': 'saturation binding',
'492': 'in vitro',
'493': 'in vivo',
'657': 'systematic evolution of ligands by exponential enrichment',
'676': 'tandem affinity purification',
'678': 'antibody array',
'695': 'sandwich immunoassay',
'729': 'luminescence based mammalian interactome mapping',
'858': 'immunodepleted coimmunoprecipitation',
'892': 'solid phase assay',
'899': 'p3 filamentous phage display',
'900': 'p8 filamentous phage display',
'921': 'surface plasmon resonance array',
'946': 'miniaturized immunoprecipitation',
'947': 'bead aggregation assay',
'963': 'interactome parallel affinity capture',
'1017': 'rna immunoprecipitation',
'1028': 'modified chromatin immunoprecipitation',
'1029': 'proteomics of isolated chromatin segments',
'1031': 'protein folding/unfolding',
'1087': 'monoclonal antibody blockade',
}
affinity=set(affinity_dict.keys())
return affinity
def get_complementation_method_ids():
complementation_dict={
'0': 'molecular interaction',
'10': 'beta galactosidase complementation',
'11': 'beta lactamase complementation',
'14': 'adenylate cyclase complementation',
'18': 'two hybrid',
'80': 'partial DNA sequence identification by hybridization',
'90': 'protein complementation assay',
'97': 'reverse ras recruitment system',
'111': 'dihydrofolate reductase reconstruction',
'112': 'ubiquitin reconstruction',
'228': 'cytoplasmic complementation assay',
'230': 'membrane bound complementation assay',
'231': 'mammalian protein protein interaction trap',
'232': 'transcriptional complementation assay',
'369': 'lex-a dimerization assay',
'370': 'tox-r dimerization assay',
'397': 'two hybrid array',
'398': 'two hybrid pooling approach',
'399': 'two hybrid fragment pooling approach',
'432': 'one hybrid',
'437': 'protein three hybrid',
'438': 'rna three hybrid',
'492': 'in vitro',
'493': 'in vivo',
'588': 'three hybrid',
'655': 'lambda repressor two hybrid',
'726': 'reverse two hybrid',
'727': 'lexa b52 complementation',
'728': 'gal4 vp16 complementation',
'809': 'bimolecular fluorescence complementation',
'895': 'protein kinase A complementation',
'916': 'lexa vp16 complementation',
'1037': 'Split renilla luciferase complementation',
'1111': 'two hybrid bait or prey pooling approach',
'1112': 'two hybrid prey pooling approach',
'1113': 'two hybrid bait and prey pooling approach',
'1203': 'split luciferase complementation',
'1204': 'split firefly luciferase complementation',
'1320': 'membrane yeast two hybrid',
}
complementation=set(complementation_dict.keys())
return complementation
def check_restriction(restriction):
"""
Checks if the restriction has been correctly introduced.
Returns the values for:
restricted_to_TAP, restricted_to_Y2H, restricted_to_user, except_TAP, except_Y2H, except_user
"""
if not restriction:
return [False, False, None, False, False, None]
else:
res = restriction.lower()
if res == 'aff':
return [True, False, None, False, False, None]
elif res == 'y2h':
return [False, True, None, False, False, None]
elif res == 'eaff':
return [False, False, None, True, False, None]
elif res == 'ey2h':
return [False, False, None, False, True, None]
else:
raise IncorrectRestrictionType(res)
class IncorrectRestrictionType(Exception):
"""
Exception that raises when a the restriction introduced to generate the
network is incorrect.
"""
def __init__(self, restriction):
self.restriction = restriction
self.restriction_types = ['aff', 'y2h', 'eaff', 'ey2h']
def __str__(self):
return 'The restriction of the network introduced ({}) is not admitted.\nThe types of restriction admitted in DIANA are: {}\n'.format(self.restriction, ', '.join(self.restriction_types))
|
|
""" test with the .transform """
import numpy as np
import pytest
from pandas._libs import groupby
from pandas.compat import StringIO
from pandas.core.dtypes.common import ensure_platform_int, is_timedelta64_dtype
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, Timestamp, concat, date_range
from pandas.core.config import option_context
from pandas.core.groupby.groupby import DataError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def test_transform():
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
# GH 8046
# make sure that we preserve the input order
df = DataFrame(
np.arange(6, dtype='int64').reshape(
3, 2), columns=["a", "b"], index=[0, 2, 1])
key = [0, 0, 1]
expected = df.sort_index().groupby(key).transform(
lambda x: x - x.mean()).groupby(key).mean()
result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(
key).mean()
assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(np.random.randn(5, 5),
columns=['a', 'b', 'c', 'd', 'e'],
index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
key = ['one', 'two', 'one', 'two', 'one']
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.Grouper(freq='M'))
g.transform(lambda x: x - 1)
# GH 9700
df = DataFrame({'a': range(5, 10), 'b': range(5)})
result = df.groupby('a').transform(max)
expected = DataFrame({'b': range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast():
df = DataFrame({'id': np.arange(100000) / 3,
'val': np.random.randn(100000)})
grp = df.groupby('id')['val']
values = np.repeat(grp.mean().values,
ensure_platform_int(grp.count().values))
expected = pd.Series(values, index=df.index, name='val')
result = grp.transform(np.mean)
assert_series_equal(result, expected)
result = grp.transform('mean')
assert_series_equal(result, expected)
# GH 12737
df = pd.DataFrame({'grouping': [0, 1, 1, 3], 'f': [1.1, 2.1, 3.1, 4.5],
'd': pd.date_range('2014-1-1', '2014-1-4'),
'i': [1, 2, 3, 4]},
columns=['grouping', 'f', 'i', 'd'])
result = df.groupby('grouping').transform('first')
dates = [pd.Timestamp('2014-1-1'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-4')]
expected = pd.DataFrame({'f': [1.1, 2.1, 2.1, 4.5],
'd': dates,
'i': [1, 2, 2, 4]},
columns=['f', 'i', 'd'])
assert_frame_equal(result, expected)
# selection
result = df.groupby('grouping')[['f', 'i']].transform('first')
expected = expected[['f', 'i']]
assert_frame_equal(result, expected)
# dup columns
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['g', 'a', 'a'])
result = df.groupby('g').transform('first')
expected = df.drop('g', axis=1)
assert_frame_equal(result, expected)
def test_transform_broadcast(tsframe, ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, ts.index)
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis=1)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
tm.assert_index_equal(result.columns, tsframe.columns)
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_axis(tsframe):
# make sure that we are setting the axes
# correctly when on axis=0 or 1
# in the presence of a non-monotonic indexer
# GH12713
base = tsframe.iloc[0:5]
r = len(base.index)
c = len(base.columns)
tso = DataFrame(np.random.randn(r, c),
index=base.index,
columns=base.columns,
dtype='float64')
# monotonic
ts = tso
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
def test_transform_dtype():
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform('mean')
expected = DataFrame([[1.5], [1.5]])
assert_frame_equal(result, expected)
def test_transform_bug():
# GH 5712
# transforming on a datetime column
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
result = df.groupby('A')['B'].transform(
lambda x: x.rank(ascending=False))
expected = Series(np.arange(5, 0, step=-1), name='B')
assert_series_equal(result, expected)
def test_transform_numeric_to_boolean():
# GH 16875
# inconsistency in transforming boolean values
expected = pd.Series([True, True], name='A')
df = pd.DataFrame({'A': [1.1, 2.2], 'B': [1, 2]})
result = df.groupby('B').A.transform(lambda x: True)
assert_series_equal(result, expected)
df = pd.DataFrame({'A': [1, 2], 'B': [1, 2]})
result = df.groupby('B').A.transform(lambda x: True)
assert_series_equal(result, expected)
def test_transform_datetime_to_timedelta():
# GH 15429
# transforming a datetime to timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
expected = pd.Series([
Timestamp('20130101') - Timestamp('20130101')] * 5, name='A')
# this does date math without changing result type in transform
base_time = df['A'][0]
result = df.groupby('A')['A'].transform(
lambda x: x.max() - x.min() + base_time) - base_time
assert_series_equal(result, expected)
# this does date math and causes the transform to return timedelta
result = df.groupby('A')['A'].transform(lambda x: x.max() - x.min())
assert_series_equal(result, expected)
def test_transform_datetime_to_numeric():
# GH 10972
# convert dt to float
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.mean())
expected = Series([-0.5, 0.5], name='b')
assert_series_equal(result, expected)
# convert dt to int
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.min())
expected = Series([0, 1], name='b')
assert_series_equal(result, expected)
def test_transform_casting():
# 13046
data = """
idx A ID3 DATETIME
0 B-028 b76cd912ff "2014-10-08 13:43:27"
1 B-054 4a57ed0b02 "2014-10-08 14:26:19"
2 B-076 1a682034f8 "2014-10-08 14:29:01"
3 B-023 b76cd912ff "2014-10-08 18:39:34"
4 B-023 f88g8d7sds "2014-10-08 18:40:18"
5 B-033 b76cd912ff "2014-10-08 18:44:30"
6 B-032 b76cd912ff "2014-10-08 18:46:00"
7 B-037 b76cd912ff "2014-10-08 18:52:15"
8 B-046 db959faf02 "2014-10-08 18:59:59"
9 B-053 b76cd912ff "2014-10-08 19:17:48"
10 B-065 b76cd912ff "2014-10-08 19:21:38"
"""
df = pd.read_csv(StringIO(data), sep=r'\s+',
index_col=[0], parse_dates=['DATETIME'])
result = df.groupby('ID3')['DATETIME'].transform(lambda x: x.diff())
assert is_timedelta64_dtype(result.dtype)
result = df[['ID3', 'DATETIME']].groupby('ID3').transform(
lambda x: x.diff())
assert is_timedelta64_dtype(result.DATETIME.dtype)
def test_transform_multiple(ts):
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.transform(lambda x: x * 2)
grouped.transform(np.mean)
def test_dispatch_transform(tsframe):
df = tsframe[::5].reindex(tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_transform_select_columns(df):
f = lambda x: x.mean()
result = df.groupby('A')['C', 'D'].transform(f)
selection = df[['C', 'D']]
expected = selection.groupby(df['A']).transform(f)
assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(df):
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = df.groupby('A')
expected['C'] = grouped['C'].transform(np.mean)
expected['D'] = grouped['D'].transform(np.mean)
expected = DataFrame(expected)
result = df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
def test_transform_function_aliases(df):
result = df.groupby('A').transform('mean')
expected = df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
result = df.groupby('A')['C'].transform('mean')
expected = df.groupby('A')['C'].transform(np.mean)
assert_series_equal(result, expected)
def test_series_fast_transform_date():
# GH 13191
df = pd.DataFrame({'grouping': [np.nan, 1, 1, 3],
'd': pd.date_range('2014-1-1', '2014-1-4')})
result = df.groupby('grouping')['d'].transform('first')
dates = [pd.NaT, pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-4')]
expected = pd.Series(dates, name='d')
assert_series_equal(result, expected)
def test_transform_length():
# GH 9697
df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})
expected = pd.Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
results = [df.groupby('col1').transform(sum)['col2'],
df.groupby('col1')['col2'].transform(sum),
df.groupby('col1').transform(nsum)['col2'],
df.groupby('col1')['col2'].transform(nsum)]
for result in results:
assert_series_equal(result, expected, check_names=False)
def test_transform_coercion():
# 14457
# when we are transforming be sure to not coerce
# via assignment
df = pd.DataFrame(dict(A=['a', 'a'], B=[0, 1]))
g = df.groupby('A')
expected = g.transform(np.mean)
result = g.transform(lambda x: np.mean(x))
assert_frame_equal(result, expected)
def test_groupby_transform_with_int():
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),
C=Series(
[1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=Series(
[-1, 0, 1, -1, 0, 1], dtype='float64')))
assert_frame_equal(result, expected)
# int case
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,
C=[1, 2, 3, 1, 2, 3], D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))
assert_frame_equal(result, expected)
# int that needs float conversion
s = Series([2, 3, 4, 10, 5, -1])
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
s1 = s.iloc[0:3]
s1 = (s1 - s1.mean()) / s1.std()
s2 = s.iloc[3:6]
s2 = (s2 - s2.mean()) / s2.std()
expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))
assert_frame_equal(result, expected)
# int downcasting
result = df.groupby('A').transform(lambda x: x * 2 / 2)
expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))
assert_frame_equal(result, expected)
def test_groupby_transform_with_nan_group():
# GH 9941
df = pd.DataFrame({'a': range(10),
'b': [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
result = df.groupby(df.b)['a'].transform(max)
expected = pd.Series([1., 1., 2., 3., np.nan, 6., 6., 9., 9., 9.],
name='a')
assert_series_equal(result, expected)
def test_transform_mixed_type():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
group['g'] = group['d'] * 2
return group[:1]
grouped = df.groupby('c')
result = grouped.apply(f)
assert result['d'].dtype == np.float64
# this is by definition a mutating operation!
with option_context('mode.chained_assignment', None):
for key, group in grouped:
res = f(group)
assert_frame_equal(res, result.loc[key])
def _check_cython_group_transform_cumulative(pd_op, np_op, dtype):
"""
Check a group transform that executes a cumulative function.
Parameters
----------
pd_op : callable
The pandas cumulative function.
np_op : callable
The analogous one in NumPy.
dtype : type
The specified dtype of the data.
"""
is_datetimelike = False
data = np.array([[1], [2], [3], [4]], dtype=dtype)
ans = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.int64)
pd_op(ans, data, labels, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), ans[:, 0],
check_dtype=False)
def test_cython_group_transform_cumsum(any_real_dtype):
# see gh-4095
dtype = np.dtype(any_real_dtype).type
pd_op, np_op = groupby.group_cumsum, np.cumsum
_check_cython_group_transform_cumulative(pd_op, np_op, dtype)
def test_cython_group_transform_cumprod():
# see gh-4095
dtype = np.float64
pd_op, np_op = groupby.group_cumprod_float64, np.cumproduct
_check_cython_group_transform_cumulative(pd_op, np_op, dtype)
def test_cython_group_transform_algos():
# see gh-4095
is_datetimelike = False
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumsum(actual, data, labels, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
is_datetimelike = True
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
actual = np.zeros_like(data, dtype='int64')
groupby.group_cumsum(actual, data.view('int64'), labels,
is_datetimelike)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
tm.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)
@pytest.mark.parametrize(
"op, args, targop",
[('cumprod', (), lambda x: x.cumprod()),
('cumsum', (), lambda x: x.cumsum()),
('shift', (-1, ), lambda x: x.shift(-1)),
('shift', (1, ), lambda x: x.shift())])
def test_cython_transform_series(op, args, targop):
# GH 4095
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
# series
for data in [s, s_missing]:
# print(data.head())
expected = data.groupby(labels).transform(targop)
tm.assert_series_equal(
expected,
data.groupby(labels).transform(op, *args))
tm.assert_series_equal(expected, getattr(
data.groupby(labels), op)(*args))
@pytest.mark.parametrize("op", ['cumprod', 'cumsum'])
@pytest.mark.parametrize("skipna", [False, True])
@pytest.mark.parametrize('input, exp', [
# When everything is NaN
({'key': ['b'] * 10, 'value': np.nan},
pd.Series([np.nan] * 10, name='value')),
# When there is a single NaN
({'key': ['b'] * 10 + ['a'] * 2,
'value': [3] * 3 + [np.nan] + [3] * 8},
{('cumprod', False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],
('cumprod', True): [3.0, 9.0, 27.0, np.nan, 81., 243., 729.,
2187., 6561., 19683., 3.0, 9.0],
('cumsum', False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],
('cumsum', True): [3.0, 6.0, 9.0, np.nan, 12., 15., 18.,
21., 24., 27., 3.0, 6.0]})])
def test_groupby_cum_skipna(op, skipna, input, exp):
df = pd.DataFrame(input)
result = df.groupby('key')['value'].transform(op, skipna=skipna)
if isinstance(exp, dict):
expected = exp[(op, skipna)]
else:
expected = exp
expected = pd.Series(expected, name='value')
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"op, args, targop",
[('cumprod', (), lambda x: x.cumprod()),
('cumsum', (), lambda x: x.cumsum()),
('shift', (-1, ), lambda x: x.shift(-1)),
('shift', (1, ), lambda x: x.shift())])
def test_cython_transform_frame(op, args, targop):
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
strings = list('qwertyuiopasdfghjklz')
strings_missing = strings[:]
strings_missing[5] = np.nan
df = DataFrame({'float': s,
'float_missing': s_missing,
'int': [1, 1, 1, 1, 2] * 200,
'datetime': pd.date_range('1990-1-1', periods=1000),
'timedelta': pd.timedelta_range(1, freq='s',
periods=1000),
'string': strings * 50,
'string_missing': strings_missing * 50},
columns=['float', 'float_missing', 'int', 'datetime',
'timedelta', 'string', 'string_missing'])
df['cat'] = df['string'].astype('category')
df2 = df.copy()
df2.index = pd.MultiIndex.from_product([range(100), range(10)])
# DataFrame - Single and MultiIndex,
# group by values, index level, columns
for df in [df, df2]:
for gb_target in [dict(by=labels), dict(level=0), dict(by='string')
]: # dict(by='string_missing')]:
# dict(by=['int','string'])]:
gb = df.groupby(**gb_target)
# whitelisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == 'shift':
gb._set_group_selection()
if op != 'shift' and 'int' not in gb_target:
# numeric apply fastpath promotes dtype so have
# to apply separately and concat
i = gb[['int']].apply(targop)
f = gb[['float', 'float_missing']].apply(targop)
expected = pd.concat([f, i], axis=1)
else:
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(expected,
gb.transform(op, *args).sort_index(
axis=1))
tm.assert_frame_equal(
expected,
getattr(gb, op)(*args).sort_index(axis=1))
# individual columns
for c in df:
if c not in ['float', 'int', 'float_missing'
] and op != 'shift':
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
gb[c].transform(op)
with pytest.raises(DataError, match=msg):
getattr(gb[c], op)()
else:
expected = gb[c].apply(targop)
expected.name = c
tm.assert_series_equal(expected,
gb[c].transform(op, *args))
tm.assert_series_equal(expected,
getattr(gb[c], op)(*args))
def test_transform_with_non_scalar_group():
# GH 10165
cols = pd.MultiIndex.from_tuples([
('syn', 'A'), ('mis', 'A'), ('non', 'A'),
('syn', 'C'), ('mis', 'C'), ('non', 'C'),
('syn', 'T'), ('mis', 'T'), ('non', 'T'),
('syn', 'G'), ('mis', 'G'), ('non', 'G')])
df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),
columns=cols,
index=['A', 'C', 'G', 'T'])
msg = 'transform must return a scalar value for each group.*'
with pytest.raises(ValueError, match=msg):
df.groupby(axis=1, level=1).transform(
lambda z: z.div(z.sum(axis=1), axis=0))
@pytest.mark.parametrize('cols,exp,comp_func', [
('a', pd.Series([1, 1, 1], name='a'), tm.assert_series_equal),
(['a', 'c'], pd.DataFrame({'a': [1, 1, 1], 'c': [1, 1, 1]}),
tm.assert_frame_equal)
])
@pytest.mark.parametrize('agg_func', [
'count', 'rank', 'size'])
def test_transform_numeric_ret(cols, exp, comp_func, agg_func):
if agg_func == 'size' and isinstance(cols, list):
pytest.xfail("'size' transformation not supported with "
"NDFrameGroupy")
# GH 19200
df = pd.DataFrame(
{'a': pd.date_range('2018-01-01', periods=3),
'b': range(3),
'c': range(7, 10)})
result = df.groupby('b')[cols].transform(agg_func)
if agg_func == 'rank':
exp = exp.astype('float')
comp_func(result, exp)
@pytest.mark.parametrize("mix_groupings", [True, False])
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize("val1,val2", [
('foo', 'bar'), (1, 2), (1., 2.)])
@pytest.mark.parametrize("fill_method,limit,exp_vals", [
("ffill", None,
[np.nan, np.nan, 'val1', 'val1', 'val1', 'val2', 'val2', 'val2']),
("ffill", 1,
[np.nan, np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan]),
("bfill", None,
['val1', 'val1', 'val1', 'val2', 'val2', 'val2', np.nan, np.nan]),
("bfill", 1,
[np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan, np.nan])
])
def test_group_fill_methods(mix_groupings, as_series, val1, val2,
fill_method, limit, exp_vals):
vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]
_exp_vals = list(exp_vals)
# Overwrite placeholder values
for index, exp_val in enumerate(_exp_vals):
if exp_val == 'val1':
_exp_vals[index] = val1
elif exp_val == 'val2':
_exp_vals[index] = val2
# Need to modify values and expectations depending on the
# Series / DataFrame that we ultimately want to generate
if mix_groupings: # ['a', 'b', 'a, 'b', ...]
keys = ['a', 'b'] * len(vals)
def interweave(list_obj):
temp = list()
for x in list_obj:
temp.extend([x, x])
return temp
_exp_vals = interweave(_exp_vals)
vals = interweave(vals)
else: # ['a', 'a', 'a', ... 'b', 'b', 'b']
keys = ['a'] * len(vals) + ['b'] * len(vals)
_exp_vals = _exp_vals * 2
vals = vals * 2
df = DataFrame({'key': keys, 'val': vals})
if as_series:
result = getattr(
df.groupby('key')['val'], fill_method)(limit=limit)
exp = Series(_exp_vals, name='val')
assert_series_equal(result, exp)
else:
result = getattr(df.groupby('key'), fill_method)(limit=limit)
exp = DataFrame({'key': keys, 'val': _exp_vals})
assert_frame_equal(result, exp)
@pytest.mark.parametrize("fill_method", ['ffill', 'bfill'])
def test_pad_stable_sorting(fill_method):
# GH 21207
x = [0] * 20
y = [np.nan] * 10 + [1] * 10
if fill_method == 'bfill':
y = y[::-1]
df = pd.DataFrame({'x': x, 'y': y})
expected = df.copy()
result = getattr(df.groupby('x'), fill_method)()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
@pytest.mark.parametrize("freq", [
None,
pytest.param('D', marks=pytest.mark.xfail(
reason='GH#23918 before method uses freq in vectorized approach'))])
@pytest.mark.parametrize("periods,fill_method,limit", [
(1, 'ffill', None), (1, 'ffill', 1),
(1, 'bfill', None), (1, 'bfill', 1),
(-1, 'ffill', None), (-1, 'ffill', 1),
(-1, 'bfill', None), (-1, 'bfill', 1),
])
def test_pct_change(test_series, freq, periods, fill_method, limit):
# GH 21200, 21621
vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4]
keys = ['a', 'b']
key_v = np.repeat(keys, len(vals))
df = DataFrame({'key': key_v, 'vals': vals * 2})
df_g = getattr(df.groupby('key'), fill_method)(limit=limit)
grp = df_g.groupby('key')
expected = grp['vals'].obj / grp['vals'].shift(periods) - 1
if test_series:
result = df.groupby('key')['vals'].pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq)
tm.assert_series_equal(result, expected)
else:
result = df.groupby('key').pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq)
tm.assert_frame_equal(result, expected.to_frame('vals'))
@pytest.mark.parametrize("func", [np.any, np.all])
def test_any_all_np_func(func):
# GH 20653
df = pd.DataFrame([['foo', True],
[np.nan, True],
['foo', True]], columns=['key', 'val'])
exp = pd.Series([True, np.nan, True], name='val')
res = df.groupby('key')['val'].transform(func)
tm.assert_series_equal(res, exp)
def test_groupby_transform_rename():
# https://github.com/pandas-dev/pandas/issues/23461
def demean_rename(x):
result = x - x.mean()
if isinstance(x, pd.Series):
return result
result = result.rename(
columns={c: '{}_demeaned'.format(c) for c in result.columns})
return result
df = pd.DataFrame({'group': list('ababa'),
'value': [1, 1, 1, 2, 2]})
expected = pd.DataFrame({'value': [-1. / 3, -0.5, -1. / 3, 0.5, 2. / 3]})
result = df.groupby('group').transform(demean_rename)
tm.assert_frame_equal(result, expected)
result_single = df.groupby('group').value.transform(demean_rename)
tm.assert_series_equal(result_single, expected['value'])
|
|
from dbmanager import *
from apicontrol import *
from extras import *
from joblib import Parallel,delayed
from ui import QCoreApplication
#TODO:Reviews,Image
def first_load():
if not os.path.isfile("test.db"):
SQLHandler().resetdb()
def add_folder(mov_dir):
addressfiles = [os.path.join(root) #addresses
for root, dirs, files in os.walk(mov_dir)
for file in files
if file.endswith(ALLOWED_EXTENSIONS)]
namefiles = [os.path.join(file) #names
for root, dirs, files in os.walk(mov_dir)
for file in files
if file.endswith(ALLOWED_EXTENSIONS)]
for ind in range(len(addressfiles)):
addressfiles[ind] = os.path.join(addressfiles[ind],namefiles[ind])
namefiles = map(name_clean,namefiles)
my_handle = SQLHandler()
for i in range (len(namefiles)):
my_handle.insert_into_videoinfo(namefiles[i],addressfiles[i],0)
my_handle.close()
def add_file(file_path):
mov_name = file_path[file_path.rfind("/") + 1:]
mov_name = name_clean(mov_name)
my_handle = SQLHandler()
my_handle.insert_into_videoinfo(mov_name,file_path,0)
my_handle.close()
def name_clean(name):
replace = [".avi",".webm",".WEBM",".mkv",".MKV",".AVI","1.4","5.1","WEB-DL","-","DVDRip","BRRip","BRrip","XviD","1CDRip","aXXo","[","]","(",")","{","}","{{","}}"
"x264","720p","StyLishSaLH (StyLish Release)","DvDScr","MP3","HDRip","WebRip",
"ETRG","YIFY","StyLishSaLH","StyLish Release","TrippleAudio","EngHindiIndonesian",
"385MB","CooL GuY","a2zRG","x264","Hindi","AAC","AC3","MP3"," R6","HDRip","H264","ESub","AQOS",
"ALLiANCE","UNRATED","ExtraTorrentRG","BrRip","mkv","mpg","DiAMOND","UsaBitcom","AMIABLE",
"BRRIP","XVID","AbSurdiTy","DVDRiP","TASTE","BluRay","HR","COCAIN","_",".","BestDivX","MAXSPEED",
"Eng","500MB","FXG","Ac3","Feel","Subs","S4A","BDRip","FTW","Xvid","Noir","1337x","ReVoTT",
"GlowGaze","mp4","Unrated","hdrip","ARCHiViST","TheWretched","www","torrentfive","com",
"1080p","1080","SecretMyth","Kingdom","Release","RISES","DvDrip","ViP3R","RISES","BiDA","READNFO",
"HELLRAZ0R","tots","BeStDivX","UsaBit","FASM","NeroZ","576p","LiMiTED","Series","ExtraTorrent","DVDRIP","~",
"BRRiP","699MB","700MB","greenbud","B89","480p","AMX","007","DVDrip","h264","phrax","ENG","TODE","LiNE",
"XVid","sC0rp","PTpower","OSCARS","DXVA","MXMG","3LT0N","TiTAN","4PlayHD","HQ","HDRiP","MoH","MP4","BadMeetsEvil",
"XViD","3Li","PTpOWeR","3D","HSBS","CC","RiPS","WEBRip","R5","PSiG","'GokU61","GB","GokU61","NL","EE","Rel","NL",
"PSEUDO","DVD","Rip","NeRoZ","EXTENDED","DVDScr","xvid","WarrLord","SCREAM","MERRY","XMAS","iMB","7o9",
"Exclusive","171","DiDee","v2","Imdb","N@tive","BR","Dual Audio","TheAaax9","world4free","LOKY"
]
year=0
for y in range(1900,2017):
if str(y) in name:
year = str(y)
yearind = name.find(year) + 4
try:
name = name[:yearind]
except:
pass
name = name.replace(str(y)," ")
break
for value in replace:
name = name.replace(value," ")
name=name.lstrip()
name=name.rstrip()
if year == 0:
return ("%s" % (name))
else:
return ("%s %s" % (name,year))
def get_mov_names():
my_handle = SQLHandler()
mov_cursor = my_handle.handle.execute('SELECT videoname FROM videoinfo ORDER BY videoname')
mov_names = []
for row in mov_cursor:
mov_names.append(row[0])
my_handle.close()
return mov_names
def get_mov_ids():
my_handle = SQLHandler()
id_cursor = my_handle.handle.execute('SELECT id FROM videoinfo')
ids = []
for row in id_cursor:
ids.append(row[0])
my_handle.close()
return ids
def mov_to_path(mov):
id = mov_to_id(mov)
my_handle = SQLHandler()
cur_pathdetails = my_handle.handle.execute("SELECT address FROM videoinfo WHERE id==%d" % id)
return cur_pathdetails.fetchone()[0]
def id_to_details(id):
my_handle = SQLHandler()
cur_imdbdetails = \
my_handle.handle.execute(
"""
select imdbtable.rating,imdbtable.votes,
imdbtable.review
FROM imdbtable,idstable
WHERE idstable.imdb_id = imdbtable.imdb_id
AND idstable.id = %d
""" % id)
cur_rtdetails = \
my_handle.handle.execute(
"""
select rttable.rating_audience
FROM rttable,idstable
WHERE idstable.rt_id = rttable.rt_id
AND idstable.id = %d
""" % id)
imdb_details = cur_imdbdetails.fetchone()
rt_details = cur_rtdetails.fetchone()
return imdb_details[0],imdb_details[1],rt_details[0],imdb_details[2]
def id_to_recodetails(mov_id):
my_handle = SQLHandler()
movdetails_cursor = my_handle.handle.execute('SELECT * FROM commontable '
'WHERE id == %d' % mov_id)
row = movdetails_cursor.fetchone()
mov_dict = {}
mov_dict["name"] = row[1]
mov_dict["directors"] = row[3].split(";")
mov_dict["genres"] = row[7].split(";")
mov_dict["cast"] = row[8].split(";")
mov_dict["writers"] = row[9].split(";")
mov_dict["producers"] = row[14].split(";")
return mov_dict
def prep_mov_details_dict(mov_id):
my_handle = SQLHandler()
movdetails_cursor = my_handle.handle.execute('SELECT * FROM commontable '
'WHERE id == %d' % mov_id)
row = movdetails_cursor.fetchone()
mov_dict = {}
mov_dict["name"] = row[1]
mov_dict["year"] = row[2]
mov_dict["directors"] = row[3]
mov_dict["kind"] = row[4]
mov_dict["plot"] = row[5]
mov_dict["img"] = row[6]
mov_dict["genres"] = row[7]
mov_dict["cast"] = row[8]
mov_dict["languages"] = row[13]
mov_dict["producers"] = row[14]
mov_dict["imdb_rating"],mov_dict["votes"], \
mov_dict["rt_rating"],mov_dict["review"] = id_to_details(mov_id)
return mov_dict
def mov_to_id(mov_name):
my_handle = SQLHandler()
id_cursor = my_handle.handle.execute('SELECT id FROM videoinfo WHERE videoname == "%s"' % mov_name)
return id_cursor.fetchone()[0]
def id_to_mov(id):
my_handle = SQLHandler()
movname_cursor = my_handle.handle.execute('SELECT videoname FROM videoinfo WHERE id == "%d"' % id)
return movname_cursor.fetchone()[0]
def add_mov_details_to_db(mov):
id = mov_to_id(mov)
try:
idstable,commtable,imdbtable,rttable = VideoHandler(mov).infodump()
idstable["id"] = id
commtable["id"] = id
sqlite_handle = SQLHandler()
try:
sqlite_handle.insert_into_commontable(commtable)
except Exception as e:
print "Error inserting in CommonTable : %s" % e.message
try:
sqlite_handle.insert_into_idstable(idstable)
except Exception as e:
print "Error inserting in IdsTable : %s" % e.message
try:
sqlite_handle.insert_into_imdbtable(imdbtable)
except Exception as e:
print "Error inserting in IMDBTable : %s" % e.message
try:
sqlite_handle.insert_into_rt_table(rttable)
except Exception as e:
print "Error inserting in RottenTomato Table : %s" % e.message
print "ID %s" % commtable["id"]
print "Inserted %s" % commtable["name"]
except Exception as e:
print "Problem in Insertion %s : %s" % (mov,e.message)
###############
#New checkbox db queries
###############
def changeSeenValue(movie):
handler = SQLHandler()
id = mov_to_id(movie)
#print str(id)
try:
handler.handle.execute('UPDATE videoinfo SET seen = NOT seen WHERE videoinfo.id = "%d"' % id)
handler.handle.commit()
print "value changed"
except Exception as e:
print e.message
def getSeenValue(movie):
handler = SQLHandler()
id = mov_to_id(movie)
#print str(id)
try:
cursor = handler.handle.execute('SELECT seen FROM videoinfo where id = "%d"' % id)
return cursor.fetchone()[0]
except Exception as e:
print e.message
def getSeenMovies():
handler = SQLHandler()
try:
cursor = handler.handle.execute('SELECT idstable.id,idstable.imdb_id,commontable.name FROM idstable, commontable, videoinfo WHERE videoinfo.seen=1 and videoinfo.id = idstable.id and commontable.id = idstable.id')
return cursor.fetchall()
except Exception as e:
print e.message
|
|
#!/usr/bin/env python
"""A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
Taken from https://github.com/Sarcasm/run-clang-format
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
from __future__ import print_function, unicode_literals
import argparse
import codecs
import difflib
import fnmatch
import io
import multiprocessing
import os
import signal
import subprocess
import sys
import traceback
from functools import partial
DEFAULT_EXTENSIONS = 'c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx'
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [
x for x in dnames
if
not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)
]
fpaths = [
x for x in fpaths if not fnmatch.fnmatch(x, pattern)
]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original,
reformatted,
fromfile='{}\t(original)'.format(file),
tofile='{}\t(reformatted)'.format(file),
n=3))
class DiffError(Exception):
def __init__(self, message, errs=None):
super(DiffError, self).__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super(UnexpectedError, self).__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError('{}: {}: {}'.format(file, e.__class__.__name__,
e), e)
def run_clang_format_diff(args, file):
try:
with io.open(file, 'r', encoding='utf-8') as f:
original = f.readlines()
except IOError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable, file]
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
#
# It's not pretty, due to Python 2 & 3 compatibility.
encoding_py3 = {}
if sys.version_info[0] >= 3:
encoding_py3['encoding'] = 'utf-8'
try:
proc = subprocess.Popen(
invocation,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
**encoding_py3)
except OSError as exc:
raise DiffError(str(exc))
proc_stdout = proc.stdout
proc_stderr = proc.stderr
if sys.version_info[0] < 3:
# make the pipes compatible with Python 3,
# reading lines should output unicode
encoding = 'utf-8'
proc_stdout = codecs.getreader(encoding)(proc_stdout)
proc_stderr = codecs.getreader(encoding)(proc_stderr)
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError("clang-format exited with status {}: '{}'".format(
proc.returncode, file), errs)
return make_diff(file, original, outs), errs
def bold_red(s):
return '\x1b[1m\x1b[31m' + s + '\x1b[0m'
def colorize(diff_lines):
def bold(s):
return '\x1b[1m' + s + '\x1b[0m'
def cyan(s):
return '\x1b[36m' + s + '\x1b[0m'
def green(s):
return '\x1b[32m' + s + '\x1b[0m'
def red(s):
return '\x1b[31m' + s + '\x1b[0m'
for line in diff_lines:
if line[:4] in ['--- ', '+++ ']:
yield bold(line)
elif line.startswith('@@ '):
yield cyan(line)
elif line.startswith('+'):
yield green(line)
elif line.startswith('-'):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
if sys.version_info[0] < 3:
sys.stdout.writelines((l.encode('utf-8') for l in diff_lines))
else:
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = 'error:'
if use_colors:
error_text = bold_red(error_text)
print("{}: {} {}".format(prog, error_text, message), file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--clang-format-executable',
metavar='EXECUTABLE',
help='path to the clang-format executable',
default='clang-format')
parser.add_argument(
'--extensions',
help='comma separated list of file extensions (default: {})'.format(
DEFAULT_EXTENSIONS),
default=DEFAULT_EXTENSIONS)
parser.add_argument(
'-r',
'--recursive',
action='store_true',
help='run recursively over directories')
parser.add_argument('files', metavar='file', nargs='+')
parser.add_argument(
'-q',
'--quiet',
action='store_true')
parser.add_argument(
'-j',
metavar='N',
type=int,
default=0,
help='run N clang-format jobs in parallel'
' (default number of cpus + 1)')
parser.add_argument(
'--color',
default='auto',
choices=['auto', 'always', 'never'],
help='show colored diff (default: auto)')
parser.add_argument(
'-e',
'--exclude',
metavar='PATTERN',
action='append',
default=[],
help='exclude paths matching the given glob-like pattern(s)'
' from recursive search')
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == 'always':
colored_stdout = True
colored_stderr = True
elif args.color == 'auto':
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
retcode = ExitStatus.SUCCESS
files = list_files(
args.files,
recursive=args.recursive,
exclude=args.exclude,
extensions=args.extensions.split(','))
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(
partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
return retcode
if __name__ == '__main__':
sys.exit(main())
|
|
from functools import wraps
from random import *
from flask import render_template, flash, redirect, request, url_for, make_response
from sqlalchemy import desc
from sqlalchemy.exc import *
from vuln_corp import app
from vuln_corp import utils
from vuln_corp.choices import ISSUE_ASSIGNEES
from vuln_corp.forms import LoginForm, SignupForm, EditUserForm, EditIssueForm, IssueForm
from vuln_corp.models import db, User, Session, Groups, Issues
def get_user(f):
@wraps(f)
def decorated_function(*args, **kwargs):
session = None
user = None
try:
session = Session.query.filter(Session.session_id == request.cookies.get('session_id')).first()
if session is not None:
user = session.get_user()
except NoSuchTableError:
pass
return f(user=user, session=session, *args, **kwargs)
return decorated_function
@app.route('/')
@app.route('/index')
def index():
user = request.cookies.get('session_id')
return render_template('index.html', title='Home', user=user, group=request.cookies.get('group'))
@app.route('/login', methods=['GET', 'POST'])
def login():
# logs the user out if they are already logged in
if request.cookies.get('session_id') is not None:
response = make_response(redirect('/login'))
response.set_cookie('session_id', '', expires=0)
response.set_cookie('group', '', expires=0)
return response
form = LoginForm()
if request.method == 'POST':
if not form.validate_on_submit():
flash(
'login request Failed for username= "%s", password=%s' % (form.username.data, str(form.password.data)),
'danger')
return render_template('login.html', form=form, group=request.cookies.get('group'))
elif form.validate_on_submit:
user = User.query.filter(User.username == request.form.get('username')).first()
password = request.form.get('password')
if user.exists:
if user.password == password:
session_id = request.cookies.get('session_id', password + str(randint(1, 999)))
new_session = Session(user.username, session_id, True)
db.session.add(new_session)
db.session.commit()
response = make_response(redirect('/profile'))
response.set_cookie('session_id', value=session_id)
group = Groups.query.filter(Groups.id == user.group).first().groupname
response.set_cookie('group', value=group)
return response
flash('Password "%s" is incorrect' % form.password.data, 'danger')
else:
flash('User "%s" does not exist' % form.username.data, 'danger')
return render_template('/login', title='Login', group=request.cookies.get('group'))
elif request.method == 'GET':
return render_template('login.html', title='Sign in', form=form, group=request.cookies.get('group'))
@app.route("/logout", methods=['GET'])
@get_user
def logout(*args, **kwargs):
user = kwargs.get('user')
session = kwargs.get('session')
session.active = False
db.session.commit()
response = make_response(redirect('/index'))
response.set_cookie('session_id', '', expires=0)
return response
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignupForm()
form.group.choices = [(g.id, g.groupname) for g in Groups.query.all()]
if request.method == 'POST':
if not form.validate_on_submit():
flash('signup FAILED for requested username="{}", email="{}"'.format(form.username.data,
str(form.email.data)), 'danger')
return render_template('signup.html', title='Signup', form=form, group=request.cookies.get('group'))
else:
newuser = User(request.form.get('username'), request.form.get('firstname'), request.form.get('lastname'),
request.form.get('email'), request.form.get('password'), request.form.get('group'),
request.form.get('bio'))
db.session.add(newuser)
db.session.commit()
flash('Signup successful for requested username="{}", email="{}"'.format(form.username.data,
str(form.email.data)), 'success')
return redirect(url_for('login'))
elif request.method == 'GET':
return render_template('signup.html', form=form, group=request.cookies.get('group'))
@app.route('/profile')
@get_user
def profile(*args, **kwargs):
user = kwargs.get('user')
session = kwargs.get('session')
return render_template('profile.html', user=user, session=session, group=request.cookies.get('group'))
@app.route('/testdb')
def testdb():
if db.session.query("1").from_statement("SELECT 1").all():
return 'It works.'
else:
return 'Something is broken.'
@app.route('/users')
@get_user
def users(*args, **kwargs):
user = kwargs.get('user')
session = kwargs.get('session')
users = User.query.all()
return render_template('show_users.html', users=users, user=user, group=request.cookies.get('group'))
@app.route('/sessions')
@get_user
def sessions(*args, **kwargs):
user = kwargs.get('user')
session = kwargs.get('session')
sessions = Session.query.all()
return render_template('show_sessions.html', sessions=sessions, user=user, group=request.cookies.get('group'))
@app.route('/about')
@get_user
def about(*args, **kwargs):
user = kwargs.get('user')
session = kwargs.get('session')
return render_template('about.html', user=user, group=request.cookies.get('group'))
@app.route('/users/<username>')
@get_user
def viewuser(username, *args, **kwargs):
user = kwargs.get('user')
session = kwargs.get('session')
viewuser = User.query.filter(User.username == username).first()
return render_template('user.html', session=session, user=user, viewuser=viewuser,
group=request.cookies.get('group'))
@app.route('/settings', methods=['GET', 'POST'])
@get_user
def settings(*args, **kwargs):
user = kwargs.get('user')
session = kwargs.get('session')
form = EditUserForm(request.form)
form.group.choices = [(g.id, g.groupname) for g in Groups.query.all()]
# initialize form with current data
form.firstname.default = user.firstname.title()
form.lastname.default = user.lastname.title()
form.email.default = user.email
form.password.default = user.password
form.group.default = int(user.group)
form.bio.default = user.bio
form.process()
if request.method == 'POST':
if not form.validate_on_submit():
flash('Error Validating form', 'danger')
return render_template('/settings.html', form=form, user=user, session=session,
group=request.cookies.get('group'))
elif form.validate_on_submit():
user.firstname = request.form.get('firstname')
user.lastname = request.form.get('lastname')
user.password = request.form.get('password')
user.email = request.form.get('email')
user.group = request.form.get('group')
user.bio = request.form.get('bio')
db.session.commit()
return redirect(url_for('profile'))
elif request.method == 'GET':
return render_template('/settings.html', user=user, session=session, form=form,
group=request.cookies.get('group'))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/issues')
@get_user
def issues(*args, **kwargs):
user = kwargs.get('user')
session = kwargs.get('session')
issues_new = Issues.query.filter(Issues.status == 'New').order_by(desc(Issues.issued_date)).all()
issues_in_progress = Issues.query.filter(Issues.status == 'In Progress').order_by(desc(Issues.issued_date)).all()
issues_done = Issues.query.filter(Issues.status == 'Closed').order_by(desc(Issues.issued_date)).all()
return render_template('issues.html', user=user, session=session, issues_new=issues_new,
issues_in_progress=issues_in_progress, issues_done=issues_done,
group=request.cookies.get('group'))
@app.route('/issues/<id>', methods=['GET', 'POST'])
@get_user
def issue(id, *args, **kwargs):
user = kwargs.get('user')
session = kwargs.get('session')
sql = "select * from Issues WHERE id ==" + id
for command in sql.split(';'):
result = db.engine.execute(command)
for row in result:
sql_issue = row
issue = utils.get_issue_from_id(sql_issue.id)
form = EditIssueForm()
# initialize form with current data
print(issue.summary)
form.summary.default = issue.summary
form.title.default = issue.title
form.assignee.default = issue.assignee
form.status.default = issue.status
form.assignee.choices = ISSUE_ASSIGNEES
form.process()
if request.method == 'POST':
if not form.validate_on_submit():
flash('The issue was unable to be updated', 'danger')
return render_template('issue.html', issue=issue, form=form, user=user, group=request.cookies.get('group'))
else:
issue.title = request.form.get('title')
issue.summary = request.form.get('summary')
issue.assignee = request.form.get('assignee')
issue.status = request.form.get('status')
db.session.commit()
return redirect('issues/' + str(issue.id))
elif request.method == 'GET':
return render_template('issue.html', issue=issue, form=form, user=user, group=request.cookies.get('group'))
@app.route('/issues/create', methods=['GET', 'POST'])
@get_user
def create_issue(*args, **kwargs):
user = kwargs.get('user')
session = kwargs.get('session')
form = IssueForm()
username = user.username
if request.method == 'POST':
if not form.validate_on_submit():
return render_template('newissue.html', form=form, group=request.cookies.get('group'))
elif form.validate_on_submit():
newissue = Issues(form.summary.data, form.title.data, user.username)
db.session.add(newissue)
db.session.commit()
return redirect(url_for('issues'))
elif request.method == 'GET':
return render_template('newissue.html', form=form, user=user, group=request.cookies.get('group'))
return render_template('newissue.html', group=request.cookies.get('group'))
|
|
# Copyright 2020 Mark Taylor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of Project monotable.
"""Responsible for scanning callers format string.
Classes:
MonoTableConfig Copy of selected MonoTable instance and class variables.
FormatScanner Format Python objects to ASCII table for monospaced font.
"""
import collections
import fnmatch
from typing import List, Tuple, Optional
import monotable.plugin
import monotable.alignment
MonoTableConfig = collections.namedtuple('MonoTableConfig',
['align_spec_chars',
'sep',
'format_func',
'format_func_map',
'option_spec_delimiters'
]
)
"""Copy of selected MonoTable instance and class variables."""
class FormatScanner:
"""Scans a format string for align, format_spec, and format directives.
The format string takes the form [align_spec][directives][format_spec].
See formats description in MonoTable.__init().
The option_spec may select a format function and may select other options.
The format functions in monotable.plugin.format_functions
are selectable using an option_spec in addition to any supplied by
format_func_map.
An example usage:
>>> import monotable
>>> def my_format_func(value, format_spec):
... pass
>>> align_spec = '<'
>>> directives = '(width=17;wrap;sep= | ;my_format)'
>>> format_spec = '.0f'
>>> format_str = align_spec + directives + format_spec
>>> config = monotable.scanner.MonoTableConfig(
... align_spec_chars='<^>',
... sep=' ',
... format_func=format, # <built-in function format>
... format_func_map= {'my_format': my_format_func},
... option_spec_delimiters='(;)')
>>> formatobj = monotable.scanner.FormatScanner(format_str, config)
# formatobj has these attributes:
>>> assert formatobj.error_text == '' # indicates no scanning errors
# enumeration value for '<'
>>> assert formatobj.align == monotable.alignment.LEFT
# overrides config.format_func
>>> assert formatobj.format_func == my_format_func
>>> assert formatobj.format_spec == '.0f'
>>> assert formatobj.width == 17
>>> assert formatobj.fixed is False
>>> assert formatobj.wrap is True
>>> assert formatobj.sep == ' | ' # overrides config.sep
Instance variables for user read access:
error_text
Describes what was wrong with option_format_spec.
Empty sting indicates a good option_format_spec.
align
Value scanned from [align_spec]. It is one of _LEFT,
_CENTER, _RIGHT, or _NOT_SPECIFIED.
format_func
Function with signature of <built-in function format>.
Reference to a format function associated with a
scanned option-name or a default value.
format_spec
format_spec part of format_str.
width
Specifies the maximum number of horizontal columns of the
formatted text.
fixed
When True, indicates the formatted text is exactly width columns.
wrap
When True, indicates the formatted text is text wrapped.
sep
Specifies separator string to be placed after the formatted
text.
none
Specifies the formatted text for None cell value.
zero
Specifies the string to replace numbers that format to
all digits of 0.
parentheses
When formatted text starts with '-', enclose in parentheses.
"""
def __init__(self, format_str: str, config: MonoTableConfig) -> None:
"""
Scan the string per delimiters, return results as instance vars.
format_str
String: [align_spec][directives][format_spec]
See formats description in MonoTable.__init__().
config
Instance of MonoTableConfig that contains copies of a subset
of MonoTable instance and class variables. See MonoTable
doc string for detailed descriptions.
align_spec_chars
sep
format_func
format_func_map
option_spec_delimiters
"""
# A design choice was made to keep all the format string
# and format option handling in a separate class. This was done
# to reduce the size of MonoTable class. The downside is that
# the functions in this class need several MonoTable class and
# instance variables. A copy of these variables are passed
# here by parameter 'config' of type MonoTableConfig.
# The design choice not taken was to move the
# FormatScanner member functions into MonoTable.
# renames to shorten long lines
align_spec_chars = config.align_spec_chars
# Since v2.1.0 formatting options are called format directives.
# The option_spec_* variable names below refer to format
# directives.
option_spec_delimiters = config.option_spec_delimiters
# Verify that the start delimiter of option_spec_delimiters cannot
# be mis-interpreted as one of the align_spec_chars.
# Do test here after instance is created to allow overriding the
# class variables on an instance. For example:
# mt = MonoTable()
# mt.align_spec_chars = 'lcr'
if align_spec_chars and option_spec_delimiters:
t = option_spec_delimiters[0]
assert t not in align_spec_chars, 'ambiguous'
if option_spec_delimiters:
d = option_spec_delimiters
assert len(d) == 3, 'one for start, between, end'
# start char and end char can be the same.
# between char must be unique.
assert d[0] != d[1], 'between char different than start char'
assert d[1] != d[2], 'between char different than end char'
# Combine hard coded format function options with user supplied
# format functions. Note that a user name will hide a hard coded
# name.
self._format_functions = dict()
self._format_functions.update(monotable.plugin.format_functions)
if config.format_func_map is not None:
self._format_functions.update(config.format_func_map)
self.error_text = ''
# if an arg is expected and there is no default value, set to None,
self.width = None # type: Optional[int]
self.fixed = False
self.wrap = False
self.lsep = None
self.rsep = None
self.sep = config.sep
self.zero = None
self.none = None
self.parentheses = False
self.format_func = config.format_func
self.align, option_format_spec = monotable.alignment.split_up(
format_str, align_spec_chars)
if not option_spec_delimiters:
# no delimiters disables option_spec scanning
self.format_spec = option_format_spec # type: str
return
self._start, self._between, self._end = option_spec_delimiters
option_spec, self.format_spec = (
self._parse(option_format_spec))
self._scan(option_spec)
def _parse(self, option_format_spec: str) -> Tuple[str, str]:
"""Split option_format_spec into option_spec and format_spec.
option_format_spec
[option_spec][format_spec]
option_spec == (*) where * is 0 or more characters
See directives description in MonoTable.__init__().
Returns a tuple consisting of:
The option_spec including the enclosing delimiters or empty string.
The rest of the string after closing delimiter or entire string.
Since v2.1.0 option_spec refers to format directives.
"""
startswith_match = self._start + '*' + self._end + '*'
if fnmatch.fnmatchcase(option_format_spec, startswith_match):
# look for self._end starting char after self._start
option_spec_end = option_format_spec.find(self._end, 1)
option_spec = option_format_spec[:option_spec_end + 1]
format_spec = option_format_spec[option_spec_end + 1:]
return option_spec, format_spec
return '', option_format_spec
def _scan(self, option_spec: str) -> None:
"""Scan option_spec string for options and values.
Updates instance variables align, error_text, format_func,
format_spec, width, fixed, wrap, sep, zero, none, and parentheses
per scan results.
Since v2.1.0 option_spec refers to format directives.
option_spec
(*) where * is one or more option names separated by ;.
See option_spec description in MonoTable.__init__().
"""
if not option_spec:
return
# assumes option_spec starts and ends with correct delimiters
option_spec_copy_for_error_text = option_spec[:]
option_spec = option_spec[1:-1] # drop start and end delimiters
if not option_spec: # anything left to scan?
return
option_list = option_spec.split(self._between) # type: List[str]
# scan for each option, process, and remove from option_list
self._scan_int_arg('width', option_list)
self._scan_no_arg('fixed', option_list)
self._scan_no_arg('wrap', option_list)
self._scan_str_arg('lsep', option_list)
self._scan_str_arg('rsep', option_list)
self._scan_str_arg('sep', option_list)
self._scan_str_arg('none', option_list)
self._scan_str_arg('zero', option_list)
self._scan_no_arg('parentheses', option_list)
self._scan_format_func(option_list)
# silently ignore fixed or wrap options if no width=N option
if self.width is None:
self.wrap = False
self.fixed = False
# rsep is an alias for sep since version 2.1.0
# unconditionally replace sep with rsep if rsep is specified.
if self.rsep is not None:
self.sep = self.rsep
if len(option_list) > 0:
# All the allowed option expressions have been removed from
# option_list. So option_list contains only invalid values or
# duplicates. Duplicates can be the same option or more than
# one format function name. Show them in the error message.
error_messages = ['In option_spec "{}"'.format(
option_spec_copy_for_error_text)]
for opt in option_list:
message = (' unrecognized option "{}",'
' bad/duplicate name or bad "=value".').format(opt)
error_messages.append(message)
error_messages.extend(self._allowed_options())
self.error_text = '\n'.join(error_messages)
def _scan_no_arg(self, option_name: str, option_list: List[str]) -> None:
"""Scan option_list for option_name option, remove if found."""
for option in option_list:
name, arg = self._option_and_arg(option)
if name == option_name:
if arg is None:
setattr(self, option_name, True)
option_list.remove(option)
break
def _scan_int_arg(self, option_name: str, option_list: List[str]) -> None:
"""Scan option_list for option_name option + int arg and remove."""
for option in option_list:
name, arg = self._option_and_arg(option)
if name == option_name:
value = self._scan_gt_value(arg)
if value is not None:
setattr(self, option_name, value)
option_list.remove(option)
break
def _scan_str_arg(self, option_name: str, option_list: List[str]) -> None:
"""Scan option_list for option_name option + string arg and remove."""
for option in option_list:
name, arg = self._option_and_arg(option)
if name == option_name:
# Keep rest after '='. OK if empty string after '='.
if arg is not None:
setattr(self, option_name, arg)
option_list.remove(option)
break
def _scan_format_func(self, option_list: List[str]) -> None:
"""Scan option_list for a format function, remove if found."""
for option in option_list:
name, arg = self._option_and_arg(option)
if name is not None and name in self._format_functions:
if arg is None:
self.format_func = self._format_functions[name]
option_list.remove(option)
break
@staticmethod
def _option_and_arg(option: str) -> Tuple[Optional[str], Optional[str]]:
"""Split up a format option to an option name and arg."""
split_option = option.split('=')
if len(split_option) == 1:
return split_option[0].strip(), None
elif len(split_option) == 2:
return split_option[0].strip(), split_option[1]
else:
return None, None
@staticmethod
def _scan_gt_value(text: Optional[str]) -> Optional[int]:
"""
Scan text for integer value N. Returns N if an int > 0, else None.
text can be None. If so return None.
"""
if text is None:
return None
try:
int_value = int(text)
except ValueError:
return None
if int_value < 1:
return None
else:
return int_value
def _allowed_format_functions(self) -> List[str]:
lines = []
fmt = ' {} - {}.'
for name in sorted(self._format_functions):
lines.append(fmt.format(name, self._format_functions[name]))
return lines
def _allowed_options(self) -> List[str]:
lines = ['Directives are enclosed by "{}" and "{}", '
'and are separated by "{}".'.format(
self._start, self._end, self._between),
'For example: "{}width=22{}sep= {}"'.format(
self._start, self._between, self._end),
'Case is significant. Whitespace is not significant except',
'after the "=" in "sep =". Allowed options are:',
' width=N - column width is at most N columns. N > 0.',
' fixed - column width is exactly width=N columns.',
' Use to qualify width=N option.',
' wrap - wrap/re-wrap to width=N.',
' Use to qualify width=N option.',
' lsep=ccc - characters after lsep= go to left of column.',
' rsep=ccc - characters after rsep= go to right of column.',
' none=ccc - None formats as the characters after none=.',
' zero=ccc - if all digits are zero replace with ccc.',
' parentheses if minus sign, enclose in parentheses.',
]
lines.extend(self._allowed_format_functions())
return lines
|
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from botocore.exceptions import ClientError
import copy
from datetime import datetime
import functools
import json
import itertools
import logging
import random
import threading
import time
import ipaddress
# Try to place nice in lambda exec environment
# where we don't require yaml
try:
import yaml
except ImportError:
yaml = None
else:
try:
from yaml import CSafeLoader
SafeLoader = CSafeLoader
except ImportError:
try:
from yaml import SafeLoader
except ImportError:
SafeLoader = None
from StringIO import StringIO
class Bag(dict):
def __getattr__(self, k):
try:
return self[k]
except KeyError:
raise AttributeError(k)
def yaml_load(value):
if yaml is None:
raise RuntimeError("Yaml not available")
return yaml.load(value, Loader=SafeLoader)
def loads(body):
return json.loads(body)
def dumps(data, fh=None, indent=0):
if fh:
return json.dump(data, fh, cls=DateTimeEncoder, indent=indent)
else:
return json.dumps(data, cls=DateTimeEncoder, indent=indent)
def format_event(evt):
io = StringIO()
json.dump(evt, io, indent=2)
return io.getvalue()
def type_schema(
type_name, inherits=None, rinherit=None,
aliases=None, required=None, **props):
"""jsonschema generation helper
params:
- type_name: name of the type
- inherits: list of document fragments that are required via anyOf[$ref]
- rinherit: use another schema as a base for this, basically work around
inherits issues with additionalProperties and type enums.
- aliases: additional names this type maybe called
- required: list of required properties, by default 'type' is required
- props: additional key value properties
"""
if aliases:
type_names = [type_name]
type_names.extend(aliases)
else:
type_names = [type_name]
if rinherit:
s = copy.deepcopy(rinherit)
s['properties']['type'] = {'enum': type_names}
else:
s = {
'type': 'object',
'properties': {
'type': {'enum': type_names}}}
# Ref based inheritance and additional properties don't mix well.
# http://goo.gl/8UyRvQ
if not inherits:
s['additionalProperties'] = False
s['properties'].update(props)
if not required:
required = []
if isinstance(required, list):
required.append('type')
s['required'] = required
if inherits:
extended = s
s = {'allOf': [{'$ref': i} for i in inherits]}
s['allOf'].append(extended)
return s
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def group_by(resources, key):
resource_map = {}
for r in resources:
resource_map.setdefault(r.get(key), []).append(r)
return resource_map
def chunks(iterable, size=50):
"""Break an iterable into lists of size"""
batch = []
for n in iterable:
batch.append(n)
if len(batch) % size == 0:
yield batch
batch = []
if batch:
yield batch
def camelResource(obj):
"""Some sources from apis return lowerCased where as describe calls
always return TitleCase, this function turns the former to the later
"""
if not isinstance(obj, dict):
return obj
for k in list(obj.keys()):
v = obj.pop(k)
obj["%s%s" % (k[0].upper(), k[1:])] = v
if isinstance(v, dict):
camelResource(v)
elif isinstance(v, list):
map(camelResource, v)
return obj
def get_account_id(session):
iam = session.client('iam')
return iam.list_roles(MaxItems=1)['Roles'][0]['Arn'].split(":")[4]
def query_instances(session, client=None, **query):
"""Return a list of ec2 instances for the query.
"""
if client is None:
client = session.client('ec2')
p = client.get_paginator('describe_instances')
results = p.paginate(**query)
return list(itertools.chain(
*[r["Instances"] for r in itertools.chain(
*[pp['Reservations'] for pp in results])]))
CONN_CACHE = threading.local()
def local_session(factory):
"""Cache a session thread local for up to 45m"""
s = getattr(CONN_CACHE, 'session', None)
t = getattr(CONN_CACHE, 'time', 0)
n = time.time()
if s is not None and t + (60 * 45) > n:
return s
s = factory()
CONN_CACHE.session = s
CONN_CACHE.time = n
return s
def annotation(i, k):
return i.get(k, ())
def set_annotation(i, k, v):
"""
>>> x = {}
>>> set_annotation(x, 'marker', 'a')
>>> annotation(x, 'marker')
['a']
"""
if not isinstance(i, dict):
raise ValueError("Can only annotate dictionaries")
if not isinstance(v, list):
v = [v]
if k in i:
ev = i.get(k)
if isinstance(ev, list):
ev.extend(v)
else:
i[k] = v
def parse_s3(s3_path):
if not s3_path.startswith('s3://'):
raise ValueError("invalid s3 path")
ridx = s3_path.find('/', 5)
if ridx == -1:
ridx = None
bucket = s3_path[5:ridx]
s3_path = s3_path.rstrip('/')
if ridx is None:
key_prefix = ""
else:
key_prefix = s3_path[s3_path.find('/', 5):]
return s3_path, bucket, key_prefix
def generate_arn(
service, resource, partition='aws',
region=None, account_id=None, resource_type=None, separator='/'):
"""Generate an Amazon Resource Name.
See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html.
"""
arn = 'arn:%s:%s:%s:%s:' % (
partition, service, region if region else '', account_id if account_id else '')
if resource_type:
arn = arn + '%s%s%s' % (resource_type, separator, resource)
else:
arn = arn + resource
return arn
def snapshot_identifier(prefix, db_identifier):
"""Return an identifier for a snapshot of a database or cluster.
"""
now = datetime.now()
return '%s-%s-%s' % (prefix, db_identifier, now.strftime('%Y-%m-%d'))
def get_retry(codes=(), max_attempts=8):
"""Retry a boto3 api call on transient errors.
https://www.awsarchitectureblog.com/2015/03/backoff.html
https://en.wikipedia.org/wiki/Exponential_backoff
:param codes: A sequence of retryable error codes
returns a function for invoking aws client calls that
retries on retryable error codes.
"""
max_delay = 2 ** max_attempts
def _retry(func, *args, **kw):
for idx, delay in enumerate(backoff_delays(1, max_delay, jitter=True)):
try:
return func(*args, **kw)
except ClientError as e:
if e.response['Error']['Code'] not in codes:
raise
elif idx == max_attempts - 1:
raise
time.sleep(delay)
return _retry
def backoff_delays(start, stop, factor=2.0, jitter=False):
"""Geometric backoff sequence w/ jitter
"""
cur = start
while cur <= stop:
if jitter:
yield cur - (cur * random.random())
else:
yield cur
cur = cur * factor
def parse_cidr(value):
"""Process cidr ranges."""
klass = IPv4Network
if '/' not in value:
klass = ipaddress.ip_address
try:
v = klass(unicode(value))
except (ipaddress.AddressValueError, ValueError):
v = None
return v
class IPv4Network(ipaddress.IPv4Network):
# Override for net 2 net containment comparison
def __contains__(self, other):
if isinstance(other, ipaddress._BaseNetwork):
return self.supernet_of(other)
return super(IPv4Network, self).__contains__(other)
worker_log = logging.getLogger('c7n.worker')
def worker(f):
"""Generic wrapper to log uncaught exceptions in a function.
When we cross concurrent.futures executor boundaries we lose our
traceback information, and when doing bulk operations we may tolerate
transient failures on a partial subset. However we still want to have
full accounting of the error in the logs, in a format that our error
collection (cwl subscription) can still pickup.
"""
def _f(*args, **kw):
try:
return f(*args, **kw)
except Exception as e:
worker_log.exception(
'Error invoking %s',
"%s.%s" % (f.__module__, f.__name__))
raise
functools.update_wrapper(_f, f)
return _f
|
|
# System libs
import os
import argparse
from distutils.version import LooseVersion
from multiprocessing import Queue, Process
# Numerical libs
import numpy as np
import math
import torch
import torch.nn as nn
from scipy.io import loadmat
# Our libs
from config import cfg
from dataset import ValDataset
from models import ModelBuilder, SegmentationModule
from utils import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices, setup_logger
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy
from PIL import Image
from tqdm import tqdm
colors = loadmat('data/color150.mat')['colors']
def visualize_result(data, pred, dir_result):
(img, seg, info) = data
# segmentation
seg_color = colorEncode(seg, colors)
# prediction
pred_color = colorEncode(pred, colors)
# aggregate images and save
im_vis = np.concatenate((img, seg_color, pred_color),
axis=1).astype(np.uint8)
img_name = info.split('/')[-1]
Image.fromarray(im_vis).save(os.path.join(dir_result, img_name.replace('.jpg', '.png')))
def evaluate(segmentation_module, loader, cfg, gpu_id, result_queue):
segmentation_module.eval()
for batch_data in loader:
# process data
batch_data = batch_data[0]
seg_label = as_numpy(batch_data['seg_label'][0])
img_resized_list = batch_data['img_data']
with torch.no_grad():
segSize = (seg_label.shape[0], seg_label.shape[1])
scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])
scores = async_copy_to(scores, gpu_id)
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict['img_data'] = img
del feed_dict['img_ori']
del feed_dict['info']
feed_dict = async_copy_to(feed_dict, gpu_id)
# forward pass
scores_tmp = segmentation_module(feed_dict, segSize=segSize)
scores = scores + scores_tmp / len(cfg.DATASET.imgSizes)
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu())
# calculate accuracy and SEND THEM TO MASTER
acc, pix = accuracy(pred, seg_label)
intersection, union = intersectionAndUnion(pred, seg_label, cfg.DATASET.num_class)
result_queue.put_nowait((acc, pix, intersection, union))
# visualization
if cfg.VAL.visualize:
visualize_result(
(batch_data['img_ori'], seg_label, batch_data['info']),
pred,
os.path.join(cfg.DIR, 'result')
)
def worker(cfg, gpu_id, start_idx, end_idx, result_queue):
torch.cuda.set_device(gpu_id)
# Dataset and Loader
dataset_val = ValDataset(
cfg.DATASET.root_dataset,
cfg.DATASET.list_val,
cfg.DATASET,
start_idx=start_idx, end_idx=end_idx)
loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=cfg.VAL.batch_size,
shuffle=False,
collate_fn=user_scattered_collate,
num_workers=2)
# Network Builders
net_encoder = ModelBuilder.build_encoder(
arch=cfg.MODEL.arch_encoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
weights=cfg.MODEL.weights_encoder)
net_decoder = ModelBuilder.build_decoder(
arch=cfg.MODEL.arch_decoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
num_class=cfg.DATASET.num_class,
weights=cfg.MODEL.weights_decoder,
use_softmax=True)
crit = nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
segmentation_module.cuda()
# Main loop
evaluate(segmentation_module, loader_val, cfg, gpu_id, result_queue)
def main(cfg, gpus):
with open(cfg.DATASET.list_val, 'r') as f:
lines = f.readlines()
num_files = len(lines)
num_files_per_gpu = math.ceil(num_files / len(gpus))
pbar = tqdm(total=num_files)
acc_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
result_queue = Queue(500)
procs = []
for idx, gpu_id in enumerate(gpus):
start_idx = idx * num_files_per_gpu
end_idx = min(start_idx + num_files_per_gpu, num_files)
proc = Process(target=worker, args=(cfg, gpu_id, start_idx, end_idx, result_queue))
print('gpu:{}, start_idx:{}, end_idx:{}'.format(gpu_id, start_idx, end_idx))
proc.start()
procs.append(proc)
# master fetches results
processed_counter = 0
while processed_counter < num_files:
if result_queue.empty():
continue
(acc, pix, intersection, union) = result_queue.get()
acc_meter.update(acc, pix)
intersection_meter.update(intersection)
union_meter.update(union)
processed_counter += 1
pbar.update(1)
for p in procs:
p.join()
# summary
iou = intersection_meter.sum / (union_meter.sum + 1e-10)
for i, _iou in enumerate(iou):
print('class [{}], IoU: {:.4f}'.format(i, _iou))
print('[Eval Summary]:')
print('Mean IoU: {:.4f}, Accuracy: {:.2f}%'
.format(iou.mean(), acc_meter.average()*100))
print('Evaluation Done!')
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
'PyTorch>=0.4.0 is required'
parser = argparse.ArgumentParser(
description="PyTorch Semantic Segmentation Validation"
)
parser.add_argument(
"--cfg",
default="config/ade20k-resnet50dilated-ppm_deepsup.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"--gpus",
default="0-3",
help="gpus to use, e.g. 0-3 or 0,1,2,3"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
# cfg.freeze()
logger = setup_logger(distributed_rank=0) # TODO
logger.info("Loaded configuration file {}".format(args.cfg))
logger.info("Running with config:\n{}".format(cfg))
# absolute paths of model weights
cfg.MODEL.weights_encoder = os.path.join(
cfg.DIR, 'encoder_' + cfg.VAL.checkpoint)
cfg.MODEL.weights_decoder = os.path.join(
cfg.DIR, 'decoder_' + cfg.VAL.checkpoint)
assert os.path.exists(cfg.MODEL.weights_encoder) and \
os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"
if not os.path.isdir(os.path.join(cfg.DIR, "result")):
os.makedirs(os.path.join(cfg.DIR, "result"))
# Parse gpu ids
gpus = parse_devices(args.gpus)
gpus = [x.replace('gpu', '') for x in gpus]
gpus = [int(x) for x in gpus]
main(cfg, gpus)
|
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import sys
import time
from typing import Callable, Dict, List, Optional, Sequence, Set, TypeVar, Tuple, Union
import warnings
from google.api_core.exceptions import GoogleAPICallError, NotFound
from google.protobuf.timestamp_pb2 import Timestamp
from cirq_google.engine.client import quantum
from cirq_google.engine.client.quantum import types as qtypes
_R = TypeVar('_R')
class EngineException(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super().__init__(message)
RETRYABLE_ERROR_CODES = [500, 503]
class EngineClient:
"""Client for the Quantum Engine API that deals with the engine protos and
the gRPC client but not cirq protos or objects. All users are likely better
served by using the Engine, EngineProgram, EngineJob, EngineProcessor, and
Calibration objects instead of using this directly.
"""
def __init__(
self,
service_args: Optional[Dict] = None,
verbose: Optional[bool] = None,
max_retry_delay_seconds: int = 3600, # 1 hour
) -> None:
"""Engine service client.
Args:
service_args: A dictionary of arguments that can be used to
configure options on the underlying gRPC client.
verbose: Suppresses stderr messages when set to False. Default is
true.
max_retry_delay_seconds: The maximum number of seconds to retry when
a retryable error code is returned.
"""
self.max_retry_delay_seconds = max_retry_delay_seconds
if verbose is None:
verbose = True
self.verbose = verbose
if not service_args:
service_args = {}
# Suppress warnings about using Application Default Credentials.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.grpc_client = quantum.QuantumEngineServiceClient(**service_args)
def _make_request(self, request: Callable[[], _R]) -> _R:
# Start with a 100ms retry delay with exponential backoff to
# max_retry_delay_seconds
current_delay = 0.1
while True:
try:
return request()
except GoogleAPICallError as err:
message = err.message
# Raise RuntimeError for exceptions that are not retryable.
# Otherwise, pass through to retry.
if err.code.value not in RETRYABLE_ERROR_CODES:
raise EngineException(message) from err
if current_delay > self.max_retry_delay_seconds:
raise TimeoutError(f'Reached max retry attempts for error: {message}')
if self.verbose:
print(message, file=sys.stderr)
print('Waiting ', current_delay, 'seconds before retrying.', file=sys.stderr)
time.sleep(current_delay)
current_delay *= 2
def create_program(
self,
project_id: str,
program_id: Optional[str],
code: qtypes.any_pb2.Any,
description: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
) -> Tuple[str, qtypes.QuantumProgram]:
"""Creates a Quantum Engine program.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
code: Properly serialized program code.
description: An optional description to set on the program.
labels: Optional set of labels to set on the program.
Returns:
Tuple of created program id and program
"""
parent_name = _project_name(project_id)
program_name = _program_name_from_ids(project_id, program_id) if program_id else ''
request = qtypes.QuantumProgram(name=program_name, code=code)
if description:
request.description = description
if labels:
request.labels.update(labels)
program = self._make_request(
lambda: self.grpc_client.create_quantum_program(parent_name, request, False)
)
return _ids_from_program_name(program.name)[1], program
def get_program(
self, project_id: str, program_id: str, return_code: bool
) -> qtypes.QuantumProgram:
"""Returns a previously created quantum program.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
return_code: If True returns the serialized program code.
"""
return self._make_request(
lambda: self.grpc_client.get_quantum_program(
_program_name_from_ids(project_id, program_id), return_code
)
)
def list_programs(
self,
project_id: str,
created_before: Optional[Union[datetime.datetime, datetime.date]] = None,
created_after: Optional[Union[datetime.datetime, datetime.date]] = None,
has_labels: Optional[Dict[str, str]] = None,
):
"""Returns a list of previously executed quantum programs.
Args:
project_id: the id of the project
created_after: retrieve programs that were created after this date
or time.
created_before: retrieve programs that were created after this date
or time.
has_labels: retrieve programs that have labels on them specified by
this dict. If the value is set to `*`, filters having the label
egardless of the label value will be filtered. For example, to
uery programs that have the shape label and have the color
label with value red can be queried using
{'color': 'red', 'shape':'*'}
"""
filters = []
if created_after is not None:
val = _date_or_time_to_filter_expr('created_after', created_after)
filters.append(f"create_time >= {val}")
if created_before is not None:
val = _date_or_time_to_filter_expr('created_before', created_before)
filters.append(f"create_time <= {val}")
if has_labels is not None:
for (k, v) in has_labels.items():
filters.append(f"labels.{k}:{v}")
return self._make_request(
lambda: self.grpc_client.list_quantum_programs(
_project_name(project_id), filter_=" AND ".join(filters)
)
)
def set_program_description(
self, project_id: str, program_id: str, description: str
) -> qtypes.QuantumProgram:
"""Sets the description for a previously created quantum program.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
description: The new program description.
Returns:
The updated quantum program.
"""
program_resource_name = _program_name_from_ids(project_id, program_id)
return self._make_request(
lambda: self.grpc_client.update_quantum_program(
program_resource_name,
qtypes.QuantumProgram(name=program_resource_name, description=description),
qtypes.field_mask_pb2.FieldMask(paths=['description']),
)
)
def _set_program_labels(
self, project_id: str, program_id: str, labels: Dict[str, str], fingerprint: str
) -> qtypes.QuantumProgram:
program_resource_name = _program_name_from_ids(project_id, program_id)
return self._make_request(
lambda: self.grpc_client.update_quantum_program(
program_resource_name,
qtypes.QuantumProgram(
name=program_resource_name, labels=labels, label_fingerprint=fingerprint
),
qtypes.field_mask_pb2.FieldMask(paths=['labels']),
)
)
def set_program_labels(
self, project_id: str, program_id: str, labels: Dict[str, str]
) -> qtypes.QuantumProgram:
"""Sets (overwriting) the labels for a previously created quantum
program.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
labels: The entire set of new program labels.
Returns:
The updated quantum program.
"""
program = self.get_program(project_id, program_id, False)
return self._set_program_labels(project_id, program_id, labels, program.label_fingerprint)
def add_program_labels(
self, project_id: str, program_id: str, labels: Dict[str, str]
) -> qtypes.QuantumProgram:
"""Adds new labels to a previously created quantum program.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
labels: New labels to add to the existing program labels.
Returns:
The updated quantum program.
"""
program = self.get_program(project_id, program_id, False)
old_labels = program.labels
new_labels = dict(old_labels)
new_labels.update(labels)
if new_labels != old_labels:
fingerprint = program.label_fingerprint
return self._set_program_labels(project_id, program_id, new_labels, fingerprint)
return program
def remove_program_labels(
self, project_id: str, program_id: str, label_keys: List[str]
) -> qtypes.QuantumProgram:
"""Removes labels with given keys from the labels of a previously
created quantum program.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
label_keys: Label keys to remove from the existing program labels.
Returns:
The updated quantum program.
"""
program = self.get_program(project_id, program_id, False)
old_labels = program.labels
new_labels = dict(old_labels)
for key in label_keys:
new_labels.pop(key, None)
if new_labels != old_labels:
fingerprint = program.label_fingerprint
return self._set_program_labels(project_id, program_id, new_labels, fingerprint)
return program
def delete_program(self, project_id: str, program_id: str, delete_jobs: bool = False) -> None:
"""Deletes a previously created quantum program.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
delete_jobs: If True will delete all the program's jobs, other this
will fail if the program contains any jobs.
"""
self._make_request(
lambda: self.grpc_client.delete_quantum_program(
_program_name_from_ids(project_id, program_id), delete_jobs
)
)
def create_job(
self,
project_id: str,
program_id: str,
job_id: Optional[str],
processor_ids: Sequence[str],
run_context: qtypes.any_pb2.Any,
priority: Optional[int] = None,
description: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
) -> Tuple[str, qtypes.QuantumJob]:
"""Creates and runs a job on Quantum Engine.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
run_context: Properly serialized run context.
processor_ids: List of processor id for running the program.
priority: Optional priority to run at, 0-1000.
description: Optional description to set on the job.
labels: Optional set of labels to set on the job.
Returns:
Tuple of created job id and job.
Raises:
ValueError: If the priority is not betwen 0 and 1000.
"""
# Check program to run and program parameters.
if priority and not 0 <= priority < 1000:
raise ValueError('priority must be between 0 and 1000')
# Create job.
job_name = _job_name_from_ids(project_id, program_id, job_id) if job_id else ''
request = qtypes.QuantumJob(
name=job_name,
scheduling_config=qtypes.SchedulingConfig(
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=[
_processor_name_from_ids(project_id, processor_id)
for processor_id in processor_ids
]
)
),
run_context=run_context,
)
if priority:
request.scheduling_config.priority = priority
if description:
request.description = description
if labels:
request.labels.update(labels)
job = self._make_request(
lambda: self.grpc_client.create_quantum_job(
_program_name_from_ids(project_id, program_id), request, False
)
)
return _ids_from_job_name(job.name)[2], job
def list_jobs(
self,
project_id: str,
program_id: Optional[str] = None,
created_before: Optional[Union[datetime.datetime, datetime.date]] = None,
created_after: Optional[Union[datetime.datetime, datetime.date]] = None,
has_labels: Optional[Dict[str, str]] = None,
execution_states: Optional[Set[quantum.enums.ExecutionStatus.State]] = None,
executed_processor_ids: Optional[List[str]] = None,
scheduled_processor_ids: Optional[List[str]] = None,
):
"""Returns the list of jobs for a given program.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Optional, a unique ID of the program within the parent
project. If None, jobs will be listed across all programs within
the project.
created_after: retrieve jobs that were created after this date
or time.
created_before: retrieve jobs that were created after this date
or time.
has_labels: retrieve jobs that have labels on them specified by
this dict. If the value is set to `*`, filters having the label
regardless of the label value will be filtered. For example, to
query programs that have the shape label and have the color
label with value red can be queried using
{'color': 'red', 'shape':'*'}
execution_states: retrieve jobs that have an execution state that
is contained in `execution_states`. See
`quantum.enums.ExecutionStatus.State` enum for accepted values.
executed_processor_ids: filters jobs by processor ID used for
execution. Matches any of provided IDs.
scheduled_processor_ids: filters jobs by any of provided
scheduled processor IDs.
"""
filters = []
if created_after is not None:
val = _date_or_time_to_filter_expr('created_after', created_after)
filters.append(f"create_time >= {val}")
if created_before is not None:
val = _date_or_time_to_filter_expr('created_before', created_before)
filters.append(f"create_time <= {val}")
if has_labels is not None:
for (k, v) in has_labels.items():
filters.append(f"labels.{k}:{v}")
if execution_states is not None:
state_filter = []
for execution_state in execution_states:
state_filter.append(f"execution_status.state = {execution_state.name}")
filters.append(f"({' OR '.join(state_filter)})")
if executed_processor_ids is not None:
ids_filter = []
for processor_id in executed_processor_ids:
ids_filter.append(f"executed_processor_id = {processor_id}")
filters.append(f"({' OR '.join(ids_filter)})")
if scheduled_processor_ids is not None:
ids_filter = []
for processor_id in scheduled_processor_ids:
ids_filter.append(f"scheduled_processor_ids: {processor_id}")
filters.append(f"({' OR '.join(ids_filter)})")
if program_id is None:
program_id = "-"
parent = _program_name_from_ids(project_id, program_id)
return self._make_request(
lambda: self.grpc_client.list_quantum_jobs(parent, filter_=" AND ".join(filters))
)
def get_job(
self, project_id: str, program_id: str, job_id: str, return_run_context: bool
) -> qtypes.QuantumJob:
"""Returns a previously created job.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
return_run_context: If true then the run context will be loaded
from the job's run_context_location and set on the returned
QuantumJob.
"""
return self._make_request(
lambda: self.grpc_client.get_quantum_job(
_job_name_from_ids(project_id, program_id, job_id), return_run_context
)
)
def set_job_description(
self, project_id: str, program_id: str, job_id: str, description: str
) -> qtypes.QuantumJob:
"""Sets the description for a previously created quantum job.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
description: The new job description.
Returns:
The updated quantum job.
"""
job_resource_name = _job_name_from_ids(project_id, program_id, job_id)
return self._make_request(
lambda: self.grpc_client.update_quantum_job(
job_resource_name,
qtypes.QuantumJob(name=job_resource_name, description=description),
qtypes.field_mask_pb2.FieldMask(paths=['description']),
)
)
def _set_job_labels(
self,
project_id: str,
program_id: str,
job_id: str,
labels: Dict[str, str],
fingerprint: str,
) -> qtypes.QuantumJob:
job_resource_name = _job_name_from_ids(project_id, program_id, job_id)
return self._make_request(
lambda: self.grpc_client.update_quantum_job(
job_resource_name,
qtypes.QuantumJob(
name=job_resource_name, labels=labels, label_fingerprint=fingerprint
),
qtypes.field_mask_pb2.FieldMask(paths=['labels']),
)
)
def set_job_labels(
self, project_id: str, program_id: str, job_id: str, labels: Dict[str, str]
) -> qtypes.QuantumJob:
"""Sets (overwriting) the labels for a previously created quantum job.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
labels: The entire set of new job labels.
Returns:
The updated quantum job.
"""
job = self.get_job(project_id, program_id, job_id, False)
return self._set_job_labels(project_id, program_id, job_id, labels, job.label_fingerprint)
def add_job_labels(
self, project_id: str, program_id: str, job_id: str, labels: Dict[str, str]
) -> qtypes.QuantumJob:
"""Adds new labels to a previously created quantum job.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
labels: New labels to add to the existing job labels.
Returns:
The updated quantum job.
"""
job = self.get_job(project_id, program_id, job_id, False)
old_labels = job.labels
new_labels = dict(old_labels)
new_labels.update(labels)
if new_labels != old_labels:
fingerprint = job.label_fingerprint
return self._set_job_labels(project_id, program_id, job_id, new_labels, fingerprint)
return job
def remove_job_labels(
self, project_id: str, program_id: str, job_id: str, label_keys: List[str]
) -> qtypes.QuantumJob:
"""Removes labels with given keys from the labels of a previously
created quantum job.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
label_keys: Label keys to remove from the existing job labels.
Returns:
The updated quantum job.
"""
job = self.get_job(project_id, program_id, job_id, False)
old_labels = job.labels
new_labels = dict(old_labels)
for key in label_keys:
new_labels.pop(key, None)
if new_labels != old_labels:
fingerprint = job.label_fingerprint
return self._set_job_labels(project_id, program_id, job_id, new_labels, fingerprint)
return job
def delete_job(self, project_id: str, program_id: str, job_id: str) -> None:
"""Deletes a previously created quantum job.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
"""
self._make_request(
lambda: self.grpc_client.delete_quantum_job(
_job_name_from_ids(project_id, program_id, job_id)
)
)
def cancel_job(self, project_id: str, program_id: str, job_id: str) -> None:
"""Cancels the given job.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
"""
self._make_request(
lambda: self.grpc_client.cancel_quantum_job(
_job_name_from_ids(project_id, program_id, job_id)
)
)
def get_job_results(
self, project_id: str, program_id: str, job_id: str
) -> qtypes.QuantumResult:
"""Returns the results of a completed job.
Args:
project_id: A project_id of the parent Google Cloud Project.
program_id: Unique ID of the program within the parent project.
job_id: Unique ID of the job within the parent program.
Returns:
The quantum result.
"""
return self._make_request(
lambda: self.grpc_client.get_quantum_result(
_job_name_from_ids(project_id, program_id, job_id)
)
)
def list_processors(self, project_id: str) -> List[qtypes.QuantumProcessor]:
"""Returns a list of Processors that the user has visibility to in the
current Engine project. The names of these processors are used to
identify devices when scheduling jobs and gathering calibration metrics.
Args:
project_id: A project_id of the parent Google Cloud Project.
Returns:
A list of metadata of each processor.
"""
response = self._make_request(
lambda: self.grpc_client.list_quantum_processors(_project_name(project_id), filter_='')
)
return list(response)
def get_processor(self, project_id: str, processor_id: str) -> qtypes.QuantumProcessor:
"""Returns a quantum processor.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
Returns:
The quantum processor.
"""
return self._make_request(
lambda: self.grpc_client.get_quantum_processor(
_processor_name_from_ids(project_id, processor_id)
)
)
def list_calibrations(
self, project_id: str, processor_id: str, filter_str: str = ''
) -> List[qtypes.QuantumCalibration]:
"""Returns a list of quantum calibrations.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
filter_str: Filter string current only supports 'timestamp' with values
of epoch time in seconds or short string 'yyyy-MM-dd'. For example:
'timestamp > 1577960125 AND timestamp <= 1578241810'
'timestamp > 2020-01-02 AND timestamp <= 2020-01-05'
Returns:
A list of calibrations.
"""
response = self._make_request(
lambda: self.grpc_client.list_quantum_calibrations(
_processor_name_from_ids(project_id, processor_id), filter_=filter_str
)
)
return list(response)
def get_calibration(
self, project_id: str, processor_id: str, calibration_timestamp_seconds: int
) -> qtypes.QuantumCalibration:
"""Returns a quantum calibration.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
calibration_timestamp_seconds: The timestamp of the calibration in
seconds.
Returns:
The quantum calibration.
"""
return self._make_request(
lambda: self.grpc_client.get_quantum_calibration(
_calibration_name_from_ids(project_id, processor_id, calibration_timestamp_seconds)
)
)
def get_current_calibration(
self, project_id: str, processor_id: str
) -> Optional[qtypes.QuantumCalibration]:
"""Returns the current quantum calibration for a processor if it has one.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
Returns:
The quantum calibration or None if there is no current calibration.
Raises:
EngineException: If the request for calibration fails.
"""
try:
return self._make_request(
lambda: self.grpc_client.get_quantum_calibration(
_processor_name_from_ids(project_id, processor_id) + '/calibrations/current'
)
)
except EngineException as err:
if isinstance(err.__cause__, NotFound):
return None
raise
def create_reservation(
self,
project_id: str,
processor_id: str,
start: datetime.datetime,
end: datetime.datetime,
whitelisted_users: Optional[List[str]] = None,
):
"""Creates a quantum reservation and returns the created object.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
reservation_id: Unique ID of the reservation in the parent project,
or None if the engine should generate an id
start: the starting time of the reservation as a datetime object
end: the ending time of the reservation as a datetime object
whitelisted_users: a list of emails that can use the reservation.
"""
parent = _processor_name_from_ids(project_id, processor_id)
reservation = qtypes.QuantumReservation(
name='',
start_time=Timestamp(seconds=int(start.timestamp())),
end_time=Timestamp(seconds=int(end.timestamp())),
)
if whitelisted_users:
reservation.whitelisted_users.extend(whitelisted_users)
return self._make_request(
lambda: self.grpc_client.create_quantum_reservation(
parent=parent, quantum_reservation=reservation
)
)
def cancel_reservation(self, project_id: str, processor_id: str, reservation_id: str):
"""Cancels a quantum reservation.
This action is only valid if the associated [QuantumProcessor]
schedule not been frozen. Otherwise, delete_reservation should
be used.
The reservation will be truncated to end at the time when the request is
serviced and any remaining time will be made available as an open swim
period. This action will only succeed if the reservation has not yet
ended and is within the processor's freeze window. If the reservation
has already ended or is beyond the processor's freeze window, then the
call will return an error.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
reservation_id: Unique ID of the reservation in the parent project,
"""
name = _reservation_name_from_ids(project_id, processor_id, reservation_id)
return self._make_request(lambda: self.grpc_client.cancel_quantum_reservation(name=name))
def delete_reservation(self, project_id: str, processor_id: str, reservation_id: str):
"""Deletes a quantum reservation.
This action is only valid if the associated [QuantumProcessor]
schedule has not been frozen. Otherwise, cancel_reservation
should be used.
If the reservation has already ended or is within the processor's
freeze window, then the call will return a `FAILED_PRECONDITION` error.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
reservation_id: Unique ID of the reservation in the parent project,
"""
name = _reservation_name_from_ids(project_id, processor_id, reservation_id)
return self._make_request(lambda: self.grpc_client.delete_quantum_reservation(name=name))
def get_reservation(self, project_id: str, processor_id: str, reservation_id: str):
"""Gets a quantum reservation from the engine.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
reservation_id: Unique ID of the reservation in the parent project.
Raises:
EngineException: If the request to get the reservation failed.
"""
try:
name = _reservation_name_from_ids(project_id, processor_id, reservation_id)
return self._make_request(lambda: self.grpc_client.get_quantum_reservation(name=name))
except EngineException as err:
if isinstance(err.__cause__, NotFound):
return None
raise
def list_reservations(
self, project_id: str, processor_id: str, filter_str: str = ''
) -> List[qtypes.QuantumReservation]:
"""Returns a list of quantum reservations.
Only reservations owned by this project will be returned.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
filter_str: A string for filtering quantum reservations.
The fields eligible for filtering are start_time and end_time
Examples:
`start_time >= 1584385200`: Reservation began on or after
the epoch time Mar 16th, 7pm GMT.
`end_time >= 1483370475`: Reservation ends on
or after Jan 2nd 2017 15:21:15
Returns:
A list of QuantumReservation objects.
"""
response = self._make_request(
lambda: self.grpc_client.list_quantum_reservations(
_processor_name_from_ids(project_id, processor_id), filter_=filter_str
)
)
return list(response)
def update_reservation(
self,
project_id: str,
processor_id: str,
reservation_id: str,
start: Optional[datetime.datetime] = None,
end: Optional[datetime.datetime] = None,
whitelisted_users: Optional[List[str]] = None,
):
"""Updates a quantum reservation.
This will update a quantum reservation's starting time, ending time,
and list of whitelisted users. If any field is not filled, it will
not be updated.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
reservation_id: Unique ID of the reservation in the parent project,
start: the new starting time of the reservation as a datetime object
end: the new ending time of the reservation as a datetime object
whitelisted_users: a list of emails that can use the reservation.
The empty list, [], will clear the whitelisted_users while None
will leave the value unchanged.
"""
name = (
_reservation_name_from_ids(project_id, processor_id, reservation_id)
if reservation_id
else ''
)
reservation = qtypes.QuantumReservation(
name=name,
)
paths = []
if start:
reservation.start_time.seconds = int(start.timestamp())
paths.append('start_time')
if end:
reservation.end_time.seconds = int(end.timestamp())
paths.append('end_time')
if whitelisted_users is not None:
reservation.whitelisted_users.extend(whitelisted_users)
paths.append('whitelisted_users')
return self._make_request(
lambda: self.grpc_client.update_quantum_reservation(
name=name,
quantum_reservation=reservation,
update_mask=qtypes.field_mask_pb2.FieldMask(paths=paths),
)
)
def list_time_slots(
self, project_id: str, processor_id: str, filter_str: str = ''
) -> List[qtypes.QuantumTimeSlot]:
"""Returns a list of quantum time slots on a processor.
Args:
project_id: A project_id of the parent Google Cloud Project.
processor_id: The processor unique identifier.
filter_str: A string expression for filtering the quantum
time slots returned by the list command. The fields
eligible for filtering are `start_time`, `end_time`.
Returns:
A list of QuantumTimeSlot objects.
"""
response = self._make_request(
lambda: self.grpc_client.list_quantum_time_slots(
_processor_name_from_ids(project_id, processor_id), filter_=filter_str
)
)
return list(response)
def _project_name(project_id: str) -> str:
return f'projects/{project_id}'
def _program_name_from_ids(project_id: str, program_id: str) -> str:
return f'projects/{project_id}/programs/{program_id}'
def _job_name_from_ids(project_id: str, program_id: str, job_id: str) -> str:
return f'projects/{project_id}/programs/{program_id}/jobs/{job_id}'
def _processor_name_from_ids(project_id: str, processor_id: str) -> str:
return f'projects/{project_id}/processors/{processor_id}'
def _calibration_name_from_ids(
project_id: str, processor_id: str, calibration_time_seconds: int
) -> str:
return (
f'projects/{project_id}/processors/{processor_id}/calibrations/{calibration_time_seconds}'
)
def _reservation_name_from_ids(project_id: str, processor_id: str, reservation_id: str) -> str:
return f'projects/{project_id}/processors/{processor_id}/reservations/{reservation_id}'
def _ids_from_program_name(program_name: str) -> Tuple[str, str]:
parts = program_name.split('/')
return parts[1], parts[3]
def _ids_from_job_name(job_name: str) -> Tuple[str, str, str]:
parts = job_name.split('/')
return parts[1], parts[3], parts[5]
def _ids_from_processor_name(processor_name: str) -> Tuple[str, str]:
parts = processor_name.split('/')
return parts[1], parts[3]
def _ids_from_calibration_name(calibration_name: str) -> Tuple[str, str, int]:
parts = calibration_name.split('/')
return parts[1], parts[3], int(parts[5])
def _date_or_time_to_filter_expr(param_name: str, param: Union[datetime.datetime, datetime.date]):
"""Formats datetime or date to filter expressions.
Args:
param_name: The name of the filter parameter (for error messaging).
param: The value of the parameter.
Raises:
ValueError: If the supplied param is not a datetime or date.
"""
if isinstance(param, datetime.datetime):
return f"{int(param.timestamp())}"
elif isinstance(param, datetime.date):
return f"{param.isoformat()}"
raise ValueError(
f"Unsupported date/time type for {param_name}: got {param} of "
f"type {type(param)}. Supported types: datetime.datetime and"
f"datetime.date"
)
|
|
#!/usr/bin/python3
from netsnmp._api import get_async
from async_devtypes import SNMP_DEVTYPES
#import cx_Oracle
import zmq
import redis, hiredis
import logging, logging.handlers
import random, sys, threading, time
import multiprocessing as mp
# Python 2 support
try:
import queue
except:
import Queue as queue
try:
time.perf_counter()
except:
time.perf_counter = time.time
# command name
cmdname = sys.argv[0]
# configure short loglevel names
logging._levelToName = {
logging.CRITICAL: 'CRI',
logging.ERROR: 'ERR',
logging.WARNING: 'WAR',
logging.INFO: 'INF',
logging.DEBUG: 'DBG',
logging.NOTSET: 'NOSET',
}
# DB definitions
DB = {
"db": {
'server': 'host',
'port' : '1521',
'name' : 'name',
'user' : 'user',
'pass' : 'pass',
},
}
### Global definitions
#BUG: udp sendto failures when 49116 > MAX_WORKERS*MAX_PER_WORKER > 49112
# limitation lies somewhere between 49112 and 49116(??) - getting EPERM (1)... kernel dropping?
#http://comments.gmane.org/gmane.comp.security.firewalls.netfilter.devel/29993
# Ceiling is around 8 * 4096 = 32768 max requests at any given time
# or 4096*12 = 3072*16 = 49152
MAX_WORKERS = 4
MAX_PER_WORKER = 4096
# Number of ZMQ PULL processes to spawn
ZMQ_PROCESSORS = 2
# Time to pause for ZMQ initialization (Seconds)
ZMQ_PAUSE=0.01
# ZMQ High water mark
ZMQ_HWM=10000000
# ZMQ endpoints
ZMQ_IN = "ipc:///tmp/%s_in" % cmdname
ZMQ_OUT = "ipc:///tmp/%s_out" % cmdname
## SNMP
# SNMP timeout is in milliseconds
SNMP_TIMEOUT=1250
SNMP_RETRIES=1
SNMP_TIMEOUT_DELTA=MAX_WORKERS
# Response callback message intake/processing
def ZMQProcessor(success, timeout, oidcount):
"""
Intake work via ZeroMQ IPC socket and queue for processing after _sentinel is signaled
"""
# ZeroMQ Message Frame pointers ([OP, HOST, DEVTYPE, OIDS..])
OP=0
HOST=1
DEVTYPE=2
OIDS=3
# Local counters, rolled up into mp.Value at end
_type_count = {}
_success = 0
_timeout = 0
_oidcount = 0
# Processor ZMQ PULL socket
incoming = zmq.Context().socket(zmq.PULL)
incoming.setsockopt(zmq.RCVHWM, ZMQ_HWM)
incoming.connect(ZMQ_OUT)
log.debug('Starting up...')
## Intake
_redis = redis.Redis(host='127.0.0.1')
with _redis.pipeline() as _redispipe:
i=0
# Pull messages via ZMQ, process and ship to Redis
while True:
response = [frame.decode() for frame in incoming.recv_multipart()]
if response[OP] == '_sentinel':
log.debug('Shutting down ZMQ_PULL socket...')
incoming.close()
break
elif response[OP] == '1':
_success+=1
try:
_type_count[response[DEVTYPE]]+=1
except KeyError:
# Need to define
_type_count[response[DEVTYPE]]=1
# Parse OIDs
_vars = SNMP_DEVTYPES[response[DEVTYPE]].parse_oids(response[OIDS:])
_oidcount+=len(_vars)
#log.debug(_vars)
try:
#log.debug("%s [%s] %s", response[HOST], response[DEVTYPE], vars)
# splice if ipv6
_redispipe.hmset(response[HOST] if not response[HOST][:4]=="udp6" else response[HOST][6:][:-1],
_vars)
i+=1
except redis.exceptions.RedisError as e:
log.debug('redis exception: %s %s:%s', str(e).strip(), response[HOST], _vars)
continue
# Flush redis pipeline periodically
if i > 4096:
_redispipe.execute()
i=0
elif response[OP] == '2':
_timeout+=1
_redispipe.execute()
# Counter rollup
with success.get_lock():
success.value+=_success
with timeout.get_lock():
timeout.value+=_timeout
with oidcount.get_lock():
oidcount.value+=_oidcount
#log.info('Finished processing %d responses in %.3fs' % (qsize, elapsed))
log.info('Processed %d responses', (_success+_timeout))
log.debug('%s', _type_count)
# Messaging Pipeline
def ZMQStreamer(running):
"""
Proxy ZeroMQ via pipeline (zmq.STREAMER)
"""
incoming = zmq.Context().socket(zmq.PULL)
incoming.setsockopt(zmq.RCVHWM, ZMQ_HWM*MAX_WORKERS)
incoming.bind(ZMQ_IN)
outgoing = zmq.Context().socket(zmq.PUSH)
outgoing.setsockopt(zmq.SNDHWM, ZMQ_HWM*ZMQ_PROCESSORS)
outgoing.bind(ZMQ_OUT)
log.debug('Starting up...')
with running.get_lock():
running.value = 1
zmq.device(zmq.STREAMER, incoming, outgoing)
if __name__ == '__main__':
#db = DB['db']
#dbh = cx_Oracle.connect('%s/%s@%s/%s' % (db['user'], db['pass'], db['server'], db['name']))
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s.%(msecs)03dZ [%(processName)s/%(levelname)s] %(message)s',
datefmt='%Y-%m-%dT%H:%M:%S')
log = logging.getLogger(__name__)
try:
_log_queue = queue.Queue()
log_async = logging.handlers.QueueHandler(_log_queue)
log_queue = logging.handlers.QueueListener(_log_queue, *log.handlers)
log_queue.start()
# Overwrite handlers to only utilize QueueHandler()
log.handlers = [log_async,]
except AttributeError:
pass
#select = dbh.cursor()
#select.arraysize = 4096
query = "select something from somewhere"
community = "public"
CM_IP = 0
MODEL = 1
# Absolute start timer
_start = time.time()
# Step timer
start = time.perf_counter()
#list(tuple(peername str, community str, devtype str, devtype class instance)..)
hosts = [(
"udp6:["+host[CM_IP]+"]" if host[CM_IP][4]==':' else host[CM_IP],
community,
host[MODEL] if host[MODEL] in SNMP_DEVTYPES else '__other__',
SNMP_DEVTYPES[host[MODEL]] if host[MODEL] in SNMP_DEVTYPES else SNMP_DEVTYPES['__other__']
) for host in (('archt01', 'other'), ('archt02', 'other'), ('archt03', 'other'), ('archt04', 'other'), ('archt05', 'other'))*200]
#select.close()
#dbh.close()
total = len(hosts)
end = time.perf_counter()
log.info('got %d hosts from DB in %.3fms' % (total, (end-start)*1000))
try:
## Global multiprocessing-safe counters
success = mp.Value('i', 0)
timeout = mp.Value('i', 0)
oidcount = mp.Value('i', 0)
## ZMQStreamer switch (set to 1 after successful initialization)
zmq_streamer_running = mp.Value('i', 0)
# Start ZeroMQ Streamer
zmq_streamer = mp.Process(target=ZMQStreamer,
args=(zmq_streamer_running,),
name='ZMQStreamer')#,
zmq_streamer.daemon=True
zmq_streamer.start()
# Let the ZMQ sockets start up
time.sleep(ZMQ_PAUSE)
if not zmq_streamer_running.value == 1:
raise RuntimeError("ZMQStreamer failed to initialize")
# Spin up ZeroMQ Processors
zmq_processors = []
for i in range(ZMQ_PROCESSORS):
zmq_processors.append(
mp.Process(target=ZMQProcessor,
args=(success, timeout, oidcount),
name='ZMQProc-%03d' % (i+1))
)
zmq_processors[-1].daemon=True
zmq_processors[-1].start()
time.sleep(ZMQ_PAUSE)
# List of multiprocessing.Process() objects (worker processes)
active_workers = []
# Worker/process id iterator
p=0
# Host index range iterator
i=0
# While any hosts or workers exist
start = time.perf_counter()
_timeout = SNMP_TIMEOUT
while hosts or active_workers:
remaining = total-i
if hosts and len(active_workers) < MAX_WORKERS:
#_timeout = SNMP_TIMEOUT+random.randint(p%2, SNMP_TIMEOUT_DELTA)
_upper = i+MAX_PER_WORKER if remaining > MAX_PER_WORKER else i+remaining
log.debug('Defining process for range %d:%d (%d*%dms)', i, _upper, (SNMP_RETRIES+1), _timeout)
# Define process(es) which call get_async C function
# get_async([(str hostname, str community, [str oid,..])..], int timeout_ms, int retries, int ZMQ_HWM, str ZMQ_IN)
active_workers.append(
mp.Process(target=get_async,
args=(hosts[:MAX_PER_WORKER],
_timeout,
SNMP_RETRIES,
ZMQ_HWM,
ZMQ_IN))
)
active_workers[-1].daemon=True
active_workers[-1].start()
del hosts[:MAX_PER_WORKER]
i+=MAX_PER_WORKER
if len(active_workers) < MAX_WORKERS:
p+=1
# test, slow initial startup
time.sleep((SNMP_TIMEOUT/MAX_WORKERS)/1000.0)
continue
else:
p=0
pids = [proc.pid for proc in active_workers]
log.debug('Process PIDs: %s' % pids)
# Continually monitor progress and re-loop when all workers are finished, or maintain process pool with force_reloop
while active_workers:
force_reloop = False
for proc in active_workers:
if proc.is_alive():
continue
else:
active_workers.remove(proc)
if proc.exitcode == 0:
log.debug('%d - Process finished' % proc.pid)
else:
log.error('%d - Process failed (%d)' % (proc.pid, proc.exitcode))
# if processes take variable amount of time, might want to break and loop to start a new process
# this ensures MAX_WORKERS processes active at all times
force_reloop = True
break
if force_reloop:
break
# Finally, raise successful completion
raise SystemExit
except RuntimeError as e:
log.critical("%s", e)
except SystemExit:
# Exit with success
end = time.perf_counter()
elapsed = end-start
log.info('Polling completed in %.3fs' % elapsed)
__start = time.perf_counter()
log.debug('Waiting for ZMQProcessors...')
# Signal end to ZMQProcessor(s)
zmq_sentinel = zmq.Context().socket(zmq.PUSH)
zmq_sentinel.connect(ZMQ_IN)
for _ in range(ZMQ_PROCESSORS):
zmq_sentinel.send(b'_sentinel')
zmq_sentinel.close()
# Wait for ZMQProcessor(s) to complete
while zmq_processors:
for zmq_processor in zmq_processors:
zmq_processor.join()
zmq_processors.remove(zmq_processor)
__elapsed = time.perf_counter()-__start
log.debug('ZMQProcessors took an additional %.3fs', __elapsed)
log.info('from %d total hosts got %d timeouts (%.1f%%)' % (total, timeout.value, ((timeout.value/total)*100)))
log.info('got %d oid responses from %d hosts' % (oidcount.value, success.value))
log.info('lost %d hosts' % (total-(success.value+timeout.value)))
log.info('%.2f oids/sec' % (oidcount.value/elapsed))
log.info('%.2f reqs/sec' % ((success.value+timeout.value)/elapsed))
finally:
_end = time.time()
_elapsed = _end-_start
log.info('total time taken %.3fs' % _elapsed)
# Ensure logging is flushed
try:
log_queue.stop()
except:
pass
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from layer_function_generator import autodoc
from tensor import assign, fill_constant
from .. import core
from ..framework import Program, Variable, Operator
from ..layer_helper import LayerHelper, unique_name
from ..initializer import force_init_on_cpu
from ops import logical_and, logical_not, logical_or
__all__ = [
'split_lod_tensor',
'merge_lod_tensor',
'BlockGuard',
'BlockGuardWithCompletion',
'StaticRNNMemoryLink',
'WhileGuard',
'While',
'Switch',
'lod_rank_table',
'max_sequence_len',
'lod_tensor_to_array',
'array_to_lod_tensor',
'increment',
'array_write',
'create_array',
'less_than',
'equal',
'array_read',
'shrink_memory',
'array_length',
'IfElse',
'DynamicRNN',
'ConditionalBlock',
'StaticRNN',
'reorder_lod_tensor_by_rank',
'ParallelDo',
'Print',
'is_empty',
]
def split_lod_tensor(input, mask, level=0):
"""
**split_lod_tensor**
This function takes in an input that contains the complete lod information,
and takes in a mask which is used to mask certain parts of the input.
The output is the true branch and the false branch with the mask applied to
the input at a certain level in the tensor.
Args:
input(tuple|list|None): The input tensor that contains complete
lod information needed to construct the output.
mask(list): A bool column vector which masks the input.
level(int): The specific lod level to rank.
Returns:
Variable: The true branch of tensor as per the mask applied to input.
Variable: The false branch of tensor as per the mask applied to input.
Examples:
.. code-block:: python
x = layers.data(name='x', shape=[1])
x.persistable = True
y = layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
"""
helper = LayerHelper('split_lod_tensor', **locals())
out_true = helper.create_tmp_variable(dtype=input.dtype)
out_false = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='split_lod_tensor',
inputs={
'X': input,
'Mask': mask,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': level})
return out_true, out_false
def merge_lod_tensor(in_true, in_false, x, mask, level=0):
"""
**merge_lod_tensor**
This function takes in an input :math:`x`, the True branch, the False
branch and a binary :math:`mask`. Using this information, this function
merges the True and False branches of the tensor into a single Output
at a certain lod level indiacted by :math:`level`.
Args:
in_true(tuple|list|None): The True branch to be merged.
in_false(tuple|list|None): The False branch to be merged.
x(tuple|list|None): The input tensor that contains complete
lod information needed to construct the output.
mask(list): A bool column vector which masks the input.
level(int): The specific lod level to rank.
Returns:
Variable: The merged output tensor.
Examples:
.. code-block:: python
x = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
level = 0
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
"""
helper = LayerHelper('merge_lod_tensor', **locals())
out = helper.create_tmp_variable(dtype=in_true.dtype)
helper.append_op(
type='merge_lod_tensor',
inputs={'X': x,
'Mask': mask,
'InTrue': in_true,
'InFalse': in_false},
outputs={'Out': out},
attrs={'level': level})
return out
def Print(input,
first_n=-1,
message=None,
summarize=-1,
print_tensor_name=True,
print_tensor_type=True,
print_tensor_shape=True,
print_tensor_lod=True,
print_phase='both'):
'''
**Print operator**
This creates a print op that will print when a tensor is accessed.
Wraps the tensor passed in so that whenever that a tensor is accessed,
the message `message` is printed, along with the current value of the
tensor `t`.
Args:
input (Variable): A Tensor to print.
summarize (int): Print this number of elements in the tensor, will print
all if left is negative.
message (str): A string message to print as a prefix.
first_n (int): Only log `first_n` number of times.
print_tensor_name (bool): Print the tensor name.
print_tensor_type (bool): Print the tensor type.
print_tensor_shape (bool): Print the tensor shape.
print_tensor_lod (bool): Print the tensor lod.
print_phase (str): Which phase to displace, including 'forward',
'backward' and 'both'. If set to 'backward' or 'both', will
print the gradients of input tensor.
Returns:
Variable: Output tensor, same data with input tensor.
Examples:
.. code-block:: python
value = some_layer(...)
Print(value, summarize=10,
message="The content of some_layer: ")
'''
helper = LayerHelper('print', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='print',
inputs={'In': input},
attrs={
'first_n': first_n,
'summarize': summarize,
'message': message or "",
'print_tensor_name': print_tensor_name,
'print_tensor_type': print_tensor_type,
'print_tensor_shape': print_tensor_shape,
'print_tensor_lod': print_tensor_lod,
'print_phase': print_phase.upper()
},
outputs={'Out': out})
return out
class BlockGuard(object):
"""
BlockGuard class.
BlockGuard class is used to create a sub-block in a program by
using the Python `with` keyword.
"""
def __init__(self, main_program):
if not isinstance(main_program, Program):
raise TypeError("BlockGuard takes a program")
self.main_program = main_program
def __enter__(self):
self.main_program.create_block()
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program.rollback()
if exc_type is not None:
return False # re-raise exception
return True
class ParallelDo(object):
"""
ParallelDo class.
ParallelDo class is used to create a ParallelDo.
"""
def __init__(self, places, use_nccl=False, name=None):
self.helper = LayerHelper("parallel_do", name=name)
self.inputs = []
self.places = places
self.outputs = []
self.status = StaticRNN.BEFORE_RNN_BLOCK
self.use_nccl = use_nccl
def do(self):
return BlockGuardWithCompletion(self)
def parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def __call__(self, *args, **kwargs):
if self.status != StaticRNN.AFTER_RNN_BLOCK:
raise ValueError("RNN output can only be retrieved after rnn block")
if len(self.outputs) == 0:
raise ValueError("RNN has no output")
elif len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def read_input(self, var):
self.inputs.append(var)
return var
def write_output(self, var):
self.outputs.append(var)
def get_parameters(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
local_inputs = set()
params = list()
for var in self.inputs:
local_inputs.add(var.name)
for op in current_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in local_inputs:
params.append(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
local_inputs.add(out_var_name)
params = list(set(params))
return [parent_block.var(name) for name in params]
def complete_op(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
self.outputs = [
parent_block.create_var(
name=o.name,
shape=o.shape,
dtype=o.dtype,
lod_level=o.lod_level,
persistable=o.persistable,
stop_gradient=o.stop_gradient) for o in self.outputs
]
inputs = [parent_block.var(i.name) for i in self.inputs]
outputs = [parent_block.var(o.name) for o in self.outputs]
parent_block.append_op(
type='parallel_do',
inputs={
'inputs': inputs,
'parameters': self.get_parameters(),
'places': self.places
},
outputs={'outputs': outputs,
'parallel_scopes': [step_scope]},
attrs={'sub_block': current_block,
'use_nccl': self.use_nccl})
class BlockGuardWithCompletion(BlockGuard):
"""
BlockGuardWithCompletion class.
BlockGuardWithCompletion class is used to create an op with a block in a program.
"""
def __init__(self, rnn):
if not (isinstance(rnn, StaticRNN) or isinstance(rnn, ParallelDo)):
raise TypeError(
"BlockGuardWithCompletion takes a StaticRNN or ParallelDo")
super(BlockGuardWithCompletion, self).__init__(rnn.helper.main_program)
self.rnn = rnn
def __enter__(self):
self.rnn.status = StaticRNN.IN_RNN_BLOCK
return super(BlockGuardWithCompletion, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
self.rnn.complete_op()
return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val,
exc_tb)
class StaticRNNMemoryLink(object):
"""
StaticRNNMemoryLink class.
Args:
init: the initial variable for Memory
init: Variable
pre_mem: the memory variable in previous time step
pre_mem: Variable
mem: the memory variable in current time step
mem: Variable
StaticRNNMemoryLink class is used to create a link between two
memory cells of a StaticRNN.
"""
def __init__(self, init, pre_mem, mem=None):
self.init = init
self.pre_mem = pre_mem
self.mem = mem
class StaticRNN(object):
"""
StaticRNN class.
StaticRNN class is used to create a StaticRNN. The RNN will have its
own parameters like inputs, outputs, memories, status and length.
"""
BEFORE_RNN_BLOCK = 0
IN_RNN_BLOCK = 1
AFTER_RNN_BLOCK = 2
def __init__(self, name=None):
self.helper = LayerHelper("static_rnn", name=name)
self.memories = {} # memory map, from pre_mem.name --> MemoryLink
self.inputs = [] # input variable list in current block
self.outputs = [] # output variable list in parent block
self.status = StaticRNN.BEFORE_RNN_BLOCK # status flag.
# sequence length, since it is a static RNN, sequence length are fixed.
self.seq_len = None
def step(self):
return BlockGuardWithCompletion(self)
def _assert_in_rnn_block_(self, method):
if self.status != StaticRNN.IN_RNN_BLOCK:
raise ValueError("You must invoke {0} in rnn block".format(method))
def memory(self,
init=None,
shape=None,
batch_ref=None,
init_value=0.0,
init_batch_dim_idx=0,
ref_batch_dim_idx=1):
"""
Args:
init: boot memory, if not set, a shape, batch_ref must be provided
shape: shape of the boot memory
batch_ref: batch size reference variable
init_value: the init value of boot memory
init_batch_dim_idx: the index of batch size in init's dimension
ref_batch_dim_idx: the index of batch size in batch_ref's dimension
"""
self._assert_in_rnn_block_('memory')
if init is None:
if shape is None or batch_ref is None:
raise ValueError(
"if init is None, memory at least need shape and batch_ref")
parent_block = self.parent_block()
var_name = unique_name.generate("@".join(
[self.helper.name, "memory_boot"]))
boot_var = parent_block.create_var(
name=var_name,
shape=shape,
dtype=batch_ref.dtype,
persistable=False)
parent_block.append_op(
type="fill_constant_batch_size_like",
inputs={'Input': [batch_ref]},
outputs={'Out': [boot_var]},
attrs={
'value': init_value,
'shape': boot_var.shape,
'dtype': boot_var.dtype,
'input_dim_idx': ref_batch_dim_idx,
'output_dim_idx': init_batch_dim_idx
})
return self.memory(init=boot_var)
else:
pre_mem = self.helper.create_variable(
name=unique_name.generate("@".join([self.helper.name, "mem"])),
dtype=init.dtype,
shape=init.shape)
self.memories[pre_mem.name] = StaticRNNMemoryLink(
init=init, pre_mem=pre_mem)
return pre_mem
def step_input(self, x):
self._assert_in_rnn_block_('step_input')
if not isinstance(x, Variable):
raise TypeError("step input takes a Variable")
if self.seq_len is None:
self.seq_len = x.shape[0]
elif self.seq_len != x.shape[0]:
raise ValueError("Static RNN only take fix seq_len input")
ipt = self.helper.create_variable(
name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type)
self.inputs.append(ipt)
return ipt
def step_output(self, o):
self._assert_in_rnn_block_('step_output')
if not isinstance(o, Variable):
raise TypeError("step output takes a Variable")
tmp_o = self.helper.create_tmp_variable(dtype=o.dtype)
self.helper.append_op(
type='rnn_memory_helper',
inputs={'X': [o]},
outputs={'Out': tmp_o},
attrs={'dtype': o.dtype})
out_var = self.parent_block().create_var(
name=tmp_o.name,
shape=[self.seq_len] + list(tmp_o.shape),
dtype=tmp_o.dtype)
self.outputs.append(out_var)
def output(self, *outputs):
for each in outputs:
self.step_output(each)
def update_memory(self, mem, var):
if not isinstance(mem, Variable) or not isinstance(var, Variable):
raise TypeError("update memory should take variables")
self.memories[mem.name].mem = var
def parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def __call__(self, *args, **kwargs):
if self.status != StaticRNN.AFTER_RNN_BLOCK:
raise ValueError("RNN output can only be retrieved after rnn block")
if len(self.outputs) == 0:
raise ValueError("RNN has no output")
elif len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def complete_op(self):
main_program = self.helper.main_program
rnn_block = main_program.current_block()
parent_block = self.parent_block()
local_inputs = set()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for oname in op.output_names:
for out_var_name in op.output(oname):
local_inputs.add(out_var_name)
for var in self.inputs:
local_inputs.add(var.name)
for m in self.memories:
local_inputs.add(m)
params = list()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in local_inputs:
params.append(in_var_name)
parameters = [parent_block.var(name) for name in params]
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
inlinks = [parent_block.var(i.name) for i in self.inputs]
outlinks = self.outputs
boot_memories = []
pre_memories = []
memories = []
for _, mem in self.memories.iteritems():
boot_memories.append(mem.init)
pre_memories.append(mem.pre_mem.name)
mem_var = rnn_block.var(mem.mem.name)
assert isinstance(mem_var, Variable)
new_mem = self.helper.create_tmp_variable(dtype=mem_var.dtype)
rnn_block.append_op(
type='rnn_memory_helper',
inputs={'X': [mem_var]},
outputs={'Out': [new_mem]},
attrs={'dtype': mem_var.dtype})
memories.append(new_mem.name)
parent_block.append_op(
type='recurrent',
inputs={
'inputs': inlinks,
'initial_states': boot_memories,
'parameters': parameters
},
outputs={'outputs': outlinks,
'step_scopes': [step_scope]},
attrs={
'ex_states': pre_memories,
'states': memories,
'sub_block': rnn_block
})
class WhileGuard(BlockGuard):
def __init__(self, while_op):
if not isinstance(while_op, While):
raise TypeError("WhileGuard takes a while op")
super(WhileGuard, self).__init__(while_op.helper.main_program)
self.while_op = while_op
def __enter__(self):
self.while_op.status = While.IN_WHILE_BLOCK
return super(WhileGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.while_op.status = While.AFTER_WHILE_BLOCK
self.while_op.complete()
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
class While(object):
BEFORE_WHILE_BLOCK = 0
IN_WHILE_BLOCK = 1
AFTER_WHILE_BLOCK = 2
def __init__(self, cond, name=None):
self.helper = LayerHelper("while", name=name)
self.status = While.BEFORE_WHILE_BLOCK
if not isinstance(cond, Variable):
raise TypeError("condition should be a variable")
assert isinstance(cond, Variable)
if cond.dtype != core.VarDesc.VarType.BOOL:
raise TypeError("condition should be a bool variable")
if reduce(lambda a, b: a * b, cond.shape, 1) != 1:
raise TypeError("condition should be a bool scalar")
self.cond_var = cond
def block(self):
return WhileGuard(self)
def complete(self):
main_program = self.helper.main_program
while_block = main_program.current_block()
parent_block = main_program.block(main_program.current_block()
.parent_idx)
inner_outputs = {self.cond_var.name}
x_name_list = set()
for op in while_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in inner_outputs:
x_name_list.add(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
inner_outputs.add(out_var_name)
out_vars = []
for inner_out_name in inner_outputs:
if inner_out_name in parent_block.vars:
out_vars.append(parent_block.var(inner_out_name))
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
parent_block.append_op(
type='while',
inputs={
'X':
[parent_block.var_recursive(x_name) for x_name in x_name_list],
'Condition': [self.cond_var]
},
outputs={'Out': out_vars,
'StepScopes': [step_scope]},
attrs={'sub_block': while_block})
def lod_rank_table(x, level=0):
"""LoD Rank Table Operator. Given an input variable **x** and a level number
of LoD, this layer creates a LodRankTable object. A LoDRankTable object
contains a list of bi-element tuples. Each tuple consists of an index and
a length, both of which are int type. Refering to specified level of LoD,
the index is the sequence index number and the length representes the
sequence length. Please note that the list is ranked in descending order by
the length. The following is an example:
.. code-block:: text
x is a LoDTensor:
x.lod = [[0, 2, 3],
[0, 5, 6, 7]]
x.data = [a, b, c, d, e, f, g]
1. set level to 0:
Create lod rank table:
lod_rank_table_obj = lod_rank_table(x, level=0)
Get:
lod_rank_table_obj.items() = [(0, 2), (1, 1)]
2. set level to 1:
Create lod rank table:
lod_rank_table_obj = lod_rank_table(x, level=1)
Get:
lod_rank_table_obj.items() = [(0, 5), (1, 1), (2, 1)]
Args:
x (Variable): Input variable, a LoDTensor based which to create the lod
rank table.
level (int): Specify the LoD level, on which to create the lod rank
table.
Returns:
Variable: The created LoDRankTable object.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[10],
dtype='float32', lod_level=1)
out = layers.lod_rank_table(x=x, level=0)
"""
helper = LayerHelper("lod_rank_table", **locals())
table = helper.create_variable(
type=core.VarDesc.VarType.LOD_RANK_TABLE,
name=unique_name.generate("lod_rank_table"))
helper.append_op(
type='lod_rank_table',
inputs={'X': x},
outputs={'Out': table},
attrs={'level': level})
return table
def max_sequence_len(rank_table):
"""Max Sequence Len Operator. Given a LoDRankTable object, this layer
returns the max length of a batch of sequences. In fact, a LoDRankTable
object contains a list of tuples(<sequence index, sequence length>) and
the list is already sorted by sequence length in descending order, so the
operator just returns the sequence length of the first tuple element.
Args:
rank_table (Variable): Input variable which is a LoDRankTable object.
Returns:
Variable: The max length of sequence.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[10],
dtype='float32', lod_level=1)
rank_table = layers.lod_rank_table(x=x, level=0)
max_seq_len = layers.max_sequence_len(rank_table)
"""
helper = LayerHelper("max_seqence_len", **locals())
res = helper.create_tmp_variable(dtype="int64")
helper.append_op(
type="max_sequence_len",
inputs={"RankTable": rank_table},
outputs={"Out": res})
return res
def lod_tensor_to_array(x, table):
""" Convert a LOD_TENSOR to an LOD_TENSOR_ARRAY.
Args:
x (Variable|list): The LOD tensor to be converted to a LOD tensor array.
table (ParamAttr|list): The variable that stores the level of lod
which is ordered by sequence length in
descending order.
Returns:
Variable: The variable of type array that has been converted from a
tensor.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[10])
table = fluid.layers.lod_rank_table(x, level=0)
array = fluid.layers.lod_tensor_to_array(x, table)
"""
helper = LayerHelper("lod_tensor_to_array", **locals())
array = helper.create_variable(
name=unique_name.generate("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': table},
outputs={'Out': array})
return array
def array_to_lod_tensor(x, table):
"""Convert a LoD_Tensor_Aarry to an LoDTensor.
Args:
x (Variable|list): The lod tensor array to be converted to a tensor.
table (ParamAttr|list): The variable that stores the level of lod
which is ordered by sequence length in
descending order.
Returns:
Variable: The variable of type tensor that has been converted
from an array.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[10])
table = fluid.layers.lod_rank_table(x, level=0)
array = fluid.layers.lod_tensor_to_array(x, table)
lod_tensor = fluid.layers.array_to_lod_tensor(array, table)
"""
helper = LayerHelper("array_to_lod_tensor", **locals())
tmp = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type="array_to_lod_tensor",
inputs={'X': x,
'RankTable': table},
outputs={'Out': tmp})
return tmp
def increment(x, value=1.0, in_place=True):
"""
This function performs an operation that increments each value in the
input :math:`x` by an amount: :math:`value` as mentioned in the input
parameter. This operation is performed in-place by default.
Args:
x (Variable|list): The tensor that has the input values.
value (float): The amount by which the values should be incremented.
in_place (bool): If the increment should be performed in-place.
Returns:
Variable: The tensor variable storing the transformation of
element-wise increment of each value in the input.
Examples:
.. code-block:: python
data = fluid.layers.data(name='data', shape=[32, 32], dtype='float32')
data = fluid.layers.increment(x=data, value=3.0, in_place=True)
"""
helper = LayerHelper("increment", **locals())
if not in_place:
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = x
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'step': float(value)})
return out
def array_write(x, i, array=None):
"""
This function writes the given input variable to the specified position
indicating by the arrary index to an output LOD_TENSOR_ARRAY. If the
output LOD_TENSOR_ARRAY is not given(None), a new one will be created and
returned.
Args:
x (Variable|list): The input tensor from which the data will be read.
i (Variable|list): The index of the output LOD_TENSOR_ARRAY, pointing to
the position to which the input tensor will be
written.
array (Variable|list): The output LOD_TENSOR_ARRAY to which the input
tensor will be written. If this parameter is
NONE, a new LOD_TENSOR_ARRAY will be created and
returned.
Returns:
Variable: The output LOD_TENSOR_ARRAY where the input tensor is written.
Examples:
.. code-block::python
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = layers.array_write(tmp, i=i)
"""
helper = LayerHelper('array_write', **locals())
if array is None:
array = helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='write_to_array',
inputs={'X': [x],
'I': [i]},
outputs={'Out': [array]})
return array
def create_array(dtype):
"""This function creates an array of type :math:`LOD_TENSOR_ARRAY` using the
LayerHelper.
Args:
dtype (int|float): The data type of the elements in the array.
Returns:
Variable: The tensor variable storing the elements of data type.
Examples:
.. code-block:: python
data = fluid.layers.create_array(dtype='float32')
"""
helper = LayerHelper("array", **locals())
return helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=dtype)
def less_than(x, y, force_cpu=True, cond=None, **ignored):
"""
**Less than**
This layer returns the truth value of :math:`x < y` elementwise.
Args:
x(Variable): First operand of *less_than*
y(Variable): Second operand of *less_than*
force_cpu(Bool|True): The output data will be on CPU if set true.
cond(Variable|None): Optional output variable to store the result of *less_than*
Returns:
Variable: The tensor variable storing the output of *less_than*.
Examples:
.. code-block:: python
less = fluid.layers.less_than(x=label, y=limit)
"""
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_tmp_variable(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='less_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs={'force_cpu': force_cpu or force_init_on_cpu()})
return cond
def equal(x, y, cond=None, **ignored):
"""
**equal**
This layer returns the truth value of :math:`x == y` elementwise.
Args:
x(Variable): First operand of *equal*
y(Variable): Second operand of *equal*
cond(Variable|None): Optional output variable to store the result of *equal*
Returns:
Variable: The tensor variable storing the output of *equal*.
Examples:
.. code-block:: python
less = fluid.layers.equal(x=label, y=limit)
"""
helper = LayerHelper("equal", **locals())
if cond is None:
cond = helper.create_tmp_variable(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [cond]})
return cond
def array_read(array, i):
"""This function performs the operation to read the data in as an
LOD_TENSOR_ARRAY.
Args:
array (Variable|list): The input tensor that will be written to an array.
i (Variable|list): The subscript index in tensor array, that points the
place where data will be written to.
Returns:
Variable: The tensor type variable that has the data written to it.
Examples:
.. code-block::python
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = layers.array_read(tmp, i=i)
"""
helper = LayerHelper('array_read', **locals())
if not isinstance(
array,
Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
raise TypeError("array should be tensor array vairable")
out = helper.create_tmp_variable(dtype=array.dtype)
helper.append_op(
type='read_from_array',
inputs={'X': [array],
'I': [i]},
outputs={'Out': [out]})
return out
def shrink_memory(x, i, table):
"""
This function creates an operator to shrink_rnn_memory using the RankTable
as mentioned in the input parameter.
"""
helper = LayerHelper('shrink_memory', **locals())
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='shrink_rnn_memory',
inputs={'X': [x],
'I': [i],
'RankTable': [table]},
outputs={'Out': [out]},
attrs={})
return out
def array_length(array):
"""This function performs the operation to find the length of the input
LOD_TENSOR_ARRAY.
Args:
array (LOD_TENSOR_ARRAY): The input array that will be used
to compute the length.
Returns:
Variable: The length of the input LoDTensorArray.
Examples:
.. code-block::python
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = fluid.layers.array_write(tmp, i=i)
arr_len = fluid.layers.array_length(arr)
"""
helper = LayerHelper('array_length', **locals())
tmp = helper.create_tmp_variable(dtype='int64')
tmp.stop_gradient = True
helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]})
return tmp
class ConditionalBlockGuard(BlockGuard):
def __init__(self, block):
if not isinstance(block, ConditionalBlock):
raise TypeError("block should be conditional block")
super(ConditionalBlockGuard, self).__init__(block.helper.main_program)
self.block = block
def __enter__(self):
return super(ConditionalBlockGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.block.complete()
return super(ConditionalBlockGuard, self).__exit__(exc_type, exc_val,
exc_tb)
class ConditionalBlock(object):
def __init__(self, inputs, is_scalar_condition=False, name=None):
for each_input in inputs:
if not isinstance(each_input, Variable):
raise TypeError("Each input should be variable")
self.inputs = inputs
self.is_scalar_condition = is_scalar_condition
self.helper = LayerHelper('conditional_block', name=name)
def block(self):
return ConditionalBlockGuard(self)
def complete(self):
inside_block = self.helper.main_program.current_block()
parent_block = self.helper.main_program.block(inside_block.parent_idx)
intermediate = set()
params = set()
for each_op in inside_block.ops:
assert isinstance(each_op, Operator)
for iname in each_op.input_names:
for in_var_name in each_op.input(iname):
if in_var_name not in intermediate:
params.add(in_var_name)
for oname in each_op.output_names:
for out_var_name in each_op.output(oname):
intermediate.add(out_var_name)
input_set = set([ipt.name for ipt in self.inputs])
param_list = [
parent_block.var_recursive(each_name) for each_name in params
if each_name not in input_set
]
out_list = [
parent_block.var(var_name) for var_name in parent_block.vars
if var_name in intermediate
]
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
parent_block.append_op(
type='conditional_block',
inputs={
'X': self.inputs,
'Params': param_list,
},
outputs={'Out': out_list,
'Scope': [step_scope]},
attrs={
'sub_block': inside_block,
'is_scalar_condition': self.is_scalar_condition
})
class Switch(object):
def __init__(self, name=None):
self.helper = LayerHelper('switch', name=name)
self.inside_scope = False
self.pre_not_conditions = []
def case(self, condition):
"""create a new block for this condition
"""
if not self.inside_scope:
raise ValueError("case should be called inside with")
if len(self.pre_not_conditions) == 0:
cond_block = ConditionalBlock([condition], is_scalar_condition=True)
not_cond = logical_not(x=condition)
self.pre_not_conditions.append(not_cond)
else:
pre_cond_num = len(self.pre_not_conditions)
pre_not_cond = self.pre_not_conditions[pre_cond_num - 1]
new_not_cond = logical_and(
x=pre_not_cond, y=logical_not(x=condition))
self.pre_not_conditions.append(new_not_cond)
cond_block = ConditionalBlock(
[logical_and(
x=pre_not_cond, y=condition)],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def default(self):
"""create a default case for this switch
"""
pre_cond_num = len(self.pre_not_conditions)
if pre_cond_num == 0:
raise ValueError("there should be at least one condition")
cond_block = ConditionalBlock(
[self.pre_not_conditions[pre_cond_num - 1]],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def __enter__(self):
"""
set flag that now is inside switch.block {}
:return:
"""
self.inside_scope = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.inside_scope = False
if exc_type is not None:
return False # re-raise exception
return True
class IfElseBlockGuard(object):
def __init__(self, is_true, ifelse):
if not isinstance(ifelse, IfElse):
raise TypeError("ifelse must be an instance of IfElse class")
if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("You cannot invoke IfElse.block() inside a block")
self.is_true = is_true
self.ie = ifelse
if is_true:
self.cond_block = ifelse.conditional_true_block
else:
self.cond_block = ifelse.conditional_false_block
if not isinstance(self.cond_block, ConditionalBlock):
raise TypeError("Unexpected situation")
self.cond_block = self.cond_block.block()
def __enter__(self):
self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS
self.cond_block.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.cond_block.__exit__(exc_type, exc_val, exc_tb):
# re-raise inside exception
return False
if len(self.ie.output_table[1 if self.is_true else 0]) == 0:
raise ValueError("Must set output inside block")
self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS
class IfElse(object):
OUT_IF_ELSE_BLOCKS = 0
IN_IF_ELSE_TRUE_BLOCKS = 1
IN_IF_ELSE_FALSE_BLOCKS = 2
def __init__(self, cond, name=None):
if not isinstance(cond, Variable):
raise TypeError("cond must be a Variable")
self.helper = LayerHelper('ifelse', name=name)
self.cond = cond
self.input_table = {}
self.status = IfElse.OUT_IF_ELSE_BLOCKS
self.conditional_true_block = ConditionalBlock(inputs=[self.cond])
self.conditional_false_block = ConditionalBlock(inputs=[self.cond])
self.output_table = ([], []) # (true_outs, false_outs)
def input(self, x):
if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("input must in true/false blocks")
if id(x) not in self.input_table:
parent_block = self.parent_block()
out_true = parent_block.create_var(
name=unique_name.generate('ifelse_input' + self.helper.name),
dtype=x.dtype)
out_false = parent_block.create_var(
name=unique_name.generate('ifelse_input' + self.helper.name),
dtype=x.dtype)
parent_block.append_op(
type='split_lod_tensor',
inputs={
'X': x,
'Mask': self.cond,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': 0})
self.input_table[id(x)] = (out_true, out_false)
else:
out_true, out_false = self.input_table[id(x)]
if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS:
return out_true
else:
return out_false
def parent_block(self):
current_block = self.helper.main_program.current_block()
return self.helper.main_program.block(current_block.parent_idx)
def true_block(self):
return IfElseBlockGuard(True, self)
def false_block(self):
return IfElseBlockGuard(False, self)
def output(self, *outs):
if self.status == self.OUT_IF_ELSE_BLOCKS:
raise ValueError("output can only be invoked in the sub-block")
out_table = self.output_table[1 if self.status ==
self.IN_IF_ELSE_TRUE_BLOCKS else 0]
parent_block = self.parent_block()
for each_out in outs:
if not isinstance(each_out, Variable):
raise TypeError("Each output should be a variable")
# create outside tensor
outside_out = parent_block.create_var(
name=unique_name.generate("_".join(
[self.helper.name, 'output'])),
dtype=each_out.dtype)
out_table.append(outside_out)
# assign local var to outside
assign(input=each_out, output=outside_out)
def __call__(self):
if self.status != self.OUT_IF_ELSE_BLOCKS:
raise ValueError("IfElse::__call__ must be out of sub-block")
false_len, true_len = map(len, self.output_table)
if false_len == 0 and true_len == 0:
raise ValueError("Must invoke true_block/false_block before "
"__call__")
elif false_len != true_len and false_len != 0 and true_len != 0:
raise ValueError("The output side must be same")
elif false_len == 0 or true_len == 0:
return self.output_table[0 if false_len != 0 else 1]
# else none of false_len/true_len is zero
# merge together
rlist = []
for false_var, true_var in zip(*self.output_table):
rlist.append(
merge_lod_tensor(
in_true=true_var,
in_false=false_var,
mask=self.cond,
x=self.cond,
level=0))
return rlist
class DynamicRNN(object):
BEFORE_RNN = 0
IN_RNN = 1
AFTER_RNN = 2
def __init__(self, name=None):
self.helper = LayerHelper('dynamic_rnn', name=name)
self.status = DynamicRNN.BEFORE_RNN
self.lod_rank_table = None
self.max_seq_len = None
self.step_idx = None
self.zero_idx = fill_constant(
shape=[1], value=0, dtype='int64', force_cpu=True)
self.mem_dict = dict()
self.output_array = []
self.outputs = []
self.cond = self.helper.create_tmp_variable(dtype='bool')
self.cond.stop_gradient = False
self.while_op = While(self.cond)
self.input_array = []
self.mem_link = []
def step_input(self, x):
self._assert_in_rnn_block_("step_input")
if not isinstance(x, Variable):
raise TypeError(
"step_input() can only take a Variable as its input.")
parent_block = self._parent_block_()
if self.lod_rank_table is None:
self.lod_rank_table = parent_block.create_var(
name=unique_name.generate('lod_rank_table'),
type=core.VarDesc.VarType.LOD_RANK_TABLE)
self.lod_rank_table.stop_gradient = True
parent_block.append_op(
type='lod_rank_table',
inputs={"X": x},
outputs={"Out": self.lod_rank_table})
self.max_seq_len = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_max_seq_len'),
dtype='int64')
self.max_seq_len.stop_gradient = False
parent_block.append_op(
type='max_sequence_len',
inputs={'RankTable': self.lod_rank_table},
outputs={"Out": self.max_seq_len})
self.cond.stop_gradient = True
parent_block.append_op(
type='less_than',
inputs={'X': self.step_idx,
'Y': self.max_seq_len},
outputs={'Out': self.cond},
attrs={'force_cpu': True})
input_array = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_input_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
self.input_array.append((input_array, x.dtype))
parent_block.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': self.lod_rank_table},
outputs={'Out': input_array})
return array_read(array=input_array, i=self.step_idx)
def static_input(self, x):
self._assert_in_rnn_block_("static_input")
if not isinstance(x, Variable):
raise TypeError(
"static_input() can only take a Variable as its input")
if self.lod_rank_table is None:
raise RuntimeError(
"static_input() must be called after step_input().")
parent_block = self._parent_block_()
x_reordered = parent_block.create_var(
name=unique_name.generate("dynamic_rnn_static_input_reordered"),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=x.dtype)
parent_block.append_op(
type='reorder_lod_tensor_by_rank',
inputs={'X': [x],
'RankTable': [self.lod_rank_table]},
outputs={'Out': [x_reordered]})
return shrink_memory(x_reordered, self.step_idx, self.lod_rank_table)
@contextlib.contextmanager
def block(self):
if self.status != DynamicRNN.BEFORE_RNN:
raise ValueError("rnn.block() can only be invoke once")
self.step_idx = fill_constant(
shape=[1], dtype='int64', value=0, force_cpu=True)
self.step_idx.stop_gradient = False
self.status = DynamicRNN.IN_RNN
with self.while_op.block():
yield
increment(x=self.step_idx, value=1.0, in_place=True)
for new_mem, mem_array in self.mem_link:
array_write(x=new_mem, i=self.step_idx, array=mem_array)
less_than(
x=self.step_idx,
y=self.max_seq_len,
force_cpu=True,
cond=self.cond)
self.status = DynamicRNN.AFTER_RNN
for each_array in self.output_array:
self.outputs.append(
array_to_lod_tensor(
x=each_array, table=self.lod_rank_table))
def __call__(self, *args, **kwargs):
if self.status != DynamicRNN.AFTER_RNN:
raise ValueError(("Output of the dynamic RNN can only be visited "
"outside the rnn block."))
if len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def memory(self,
init=None,
shape=None,
value=0.0,
need_reorder=False,
dtype='float32'):
self._assert_in_rnn_block_('memory')
if init is not None:
if not isinstance(init, Variable):
raise TypeError(
"The input arg `init` of memory() must be a Variable")
parent_block = self._parent_block_()
init_tensor = init
if need_reorder == True:
if self.lod_rank_table is None:
raise ValueError(
'If set need_reorder to True, make sure step_input be '
'invoked before '
'memory(init=init, need_reordered=True, ...).')
init_reordered = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_mem_init_reordered'),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=init.dtype)
parent_block.append_op(
type='reorder_lod_tensor_by_rank',
inputs={
'X': [init_tensor],
'RankTable': [self.lod_rank_table]
},
outputs={'Out': [init_reordered]})
init_tensor = init_reordered
mem_array = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_mem_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=init.dtype)
parent_block.append_op(
type='write_to_array',
inputs={'X': init_tensor,
'I': self.zero_idx},
outputs={'Out': mem_array})
retv = array_read(array=mem_array, i=self.step_idx)
retv = shrink_memory(
x=retv, i=self.step_idx, table=self.lod_rank_table)
self.mem_dict[retv.name] = mem_array
return retv
else:
if len(self.input_array) == 0:
raise ValueError(
"step_input should be invoked before memory(shape=..., value=...)"
)
parent_block = self._parent_block_()
init = parent_block.create_var(
name=unique_name.generate('mem_init'), dtype=dtype)
arr, dtype = self.input_array[0]
in0 = parent_block.create_var(
name=unique_name.generate('in0'), dtype=dtype)
parent_block.append_op(
type='read_from_array',
inputs={'X': [arr],
'I': [self.zero_idx]},
outputs={'Out': [in0]})
parent_block.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': [in0]},
outputs={'Out': [init]},
attrs={
'shape': [-1] + shape,
'value': float(value),
'dtype': init.dtype
})
return self.memory(init=init)
def update_memory(self, ex_mem, new_mem):
self._assert_in_rnn_block_('update_memory')
if not isinstance(ex_mem, Variable):
raise TypeError("The input arg `ex_mem` of update_memory() must "
"be a Variable")
if not isinstance(new_mem, Variable):
raise TypeError("The input arg `new_mem` of update_memory() must "
"be a Variable")
mem_array = self.mem_dict.get(ex_mem.name, None)
if mem_array is None:
raise ValueError("Please invoke memory before update_memory")
if self.lod_rank_table is None:
raise ValueError("Please invoke step_input before update_memory")
self.mem_link.append((new_mem, mem_array))
def output(self, *outputs):
self._assert_in_rnn_block_('output')
parent_block = self._parent_block_()
for each in outputs:
outside_array = parent_block.create_var(
name=unique_name.generate("_".join(
[self.helper.name, "output_array", each.name])),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=each.dtype)
array_write(x=each, i=self.step_idx, array=outside_array)
self.output_array.append(outside_array)
def _parent_block_(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def _assert_in_rnn_block_(self, method):
if self.status != DynamicRNN.IN_RNN:
raise ValueError("{0} can only be invoked inside rnn block.".format(
method))
@autodoc()
def reorder_lod_tensor_by_rank(x, rank_table):
helper = LayerHelper('reorder_lod_tensor_by_rank', **locals())
helper.is_instance('x', Variable)
helper.is_instance('rank_table', Variable)
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='reorder_lod_tensor_by_rank',
inputs={'X': [x],
'RankTable': [rank_table]},
outputs={'Out': [out]})
return out
def is_empty(x, cond=None, **ignored):
"""
**Is Empty**
This layer returns the truth value of whether the variable is empty.
Args:
x(Variable): Operand of *is_empty*
cond(Variable|None): Optional output variable to store the result
of *is_empty*
Returns:
Variable: The tensor variable storing the output of *is_empty*.
Raises:
TypeError: If input cond is not a variable, or cond's dtype is
not bool
Examples:
.. code-block:: python
less = fluid.layers.is_empty(x=input)
"""
helper = LayerHelper("is_empty", **locals())
if cond is None:
cond = helper.create_tmp_variable(dtype='bool')
cond.stop_gradient = True
elif not isinstance(cond, Variable):
raise TypeError("cond takes a variable")
elif cond.dtype != 'bool':
raise TypeError("The data type of cond must be bool")
helper.append_op(
type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]})
return cond
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class for running instrumentation tests on a single device."""
import collections
import logging
import os
import re
import time
from devil.android import device_errors
from devil.android import flag_changer
from pylib import constants
from pylib import valgrind_tools
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.constants import host_paths
from pylib.instrumentation import instrumentation_test_instance
from pylib.instrumentation import json_perf_parser
from pylib.instrumentation import test_result
from pylib.local.device import local_device_instrumentation_test_run
with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
_PERF_TEST_ANNOTATION = 'PerfTest'
class TestRunner(base_test_runner.BaseTestRunner):
"""Responsible for running a series of tests connected to a single device."""
_DEVICE_COVERAGE_DIR = 'chrome/test/coverage'
_HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
_DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
'/chrome-profile*')
def __init__(self, test_options, device, shard_index, test_pkg,
additional_flags=None):
"""Create a new TestRunner.
Args:
test_options: An InstrumentationOptions object.
device: Attached android device.
shard_index: Shard index.
test_pkg: A TestPackage object.
additional_flags: A list of additional flags to add to the command line.
"""
super(TestRunner, self).__init__(device, test_options.tool)
self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
self._logcat_monitor = None
self.coverage_device_file = None
self.coverage_dir = test_options.coverage_dir
self.coverage_host_file = None
self.options = test_options
package_info_candidates = [a for a in constants.PACKAGE_INFO.itervalues()
if a.test_package == test_pkg.GetPackageName()]
assert len(package_info_candidates) < 2, (
'Multiple packages have the same test package')
self.package_info = (package_info_candidates[0] if package_info_candidates
else None)
self.test_pkg = test_pkg
# Use the correct command line file for the package under test.
if self.package_info and self.package_info.cmdline_file:
self.flags = flag_changer.FlagChanger(
self.device, self.package_info.cmdline_file)
if additional_flags:
self.flags.AddFlags(additional_flags)
else:
self.flags = None
#override
def InstallTestPackage(self):
self.test_pkg.Install(self.device)
def _GetInstrumentationArgs(self):
ret = {}
if self.options.wait_for_debugger:
ret['debug'] = 'true'
if self.coverage_dir:
ret['coverage'] = 'true'
ret['coverageFile'] = self.coverage_device_file
return ret
def _TakeScreenshot(self, test):
"""Takes a screenshot from the device."""
screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
logging.info('Taking screenshot named %s', screenshot_name)
self.device.TakeScreenshot(screenshot_name)
def SetUp(self):
"""Sets up the test harness and device before all tests are run."""
super(TestRunner, self).SetUp()
if not self.device.HasRoot():
logging.warning('Unable to enable java asserts for %s; run `adb root`.',
str(self.device))
else:
if self.device.SetJavaAsserts(self.options.set_asserts):
self.device.RunShellCommand('stop')
self.device.RunShellCommand('start')
self.device.WaitUntilFullyBooted()
# Set the appropriate debug app if one exists
if self.package_info and self.package_info.package:
self.device.RunShellCommand(['am', 'set-debug-app', '--persistent',
self.package_info.package],
check_return=True)
# We give different default value to launch HTTP server based on shard index
# because it may have race condition when multiple processes are trying to
# launch lighttpd with same port at same time.
self.LaunchTestHttpServer(
os.path.join(host_paths.DIR_SOURCE_ROOT), self._lighttp_port)
if self.flags:
flags_to_add = ['--disable-fre', '--enable-test-intents']
if self.options.strict_mode and self.options.strict_mode != 'off':
flags_to_add.append('--strict-mode=' + self.options.strict_mode)
if self.options.device_flags:
with open(self.options.device_flags) as device_flags_file:
stripped_flags = (l.strip() for l in device_flags_file)
flags_to_add.extend([flag for flag in stripped_flags if flag])
self.flags.AddFlags(flags_to_add)
def TearDown(self):
"""Cleans up the test harness and saves outstanding data from test run."""
if self.flags:
self.flags.Restore()
# Remove package-specific configuration
self.device.RunShellCommand(['am', 'clear-debug-app'], check_return=True)
super(TestRunner, self).TearDown()
def TestSetup(self, test, flag_modifiers):
"""Sets up the test harness for running a particular test.
Args:
test: The name of the test that will be run.
"""
self.SetupPerfMonitoringIfNeeded(test)
self._SetupIndividualTestTimeoutScale(test)
self.tool.SetupEnvironment()
if self.flags:
self.flags.PushFlags(add=flag_modifiers.add, remove=flag_modifiers.remove)
# Make sure the forwarder is still running.
self._RestartHttpServerForwarderIfNecessary()
if self.coverage_dir:
coverage_basename = '%s.ec' % test
self.coverage_device_file = '%s/%s/%s' % (
self.device.GetExternalStoragePath(),
TestRunner._DEVICE_COVERAGE_DIR, coverage_basename)
self.coverage_host_file = os.path.join(
self.coverage_dir, coverage_basename)
def _IsFreTest(self, test):
"""Determines whether a test is a first run experience test.
Args:
test: The name of the test to be checked.
Returns:
Whether the feature being tested is FirstRunExperience.
"""
annotations = self.test_pkg.GetTestAnnotations(test)
feature = annotations.get('Feature', None)
return feature and 'FirstRunExperience' in feature['value']
def _IsPerfTest(self, test):
"""Determines whether a test is a performance test.
Args:
test: The name of the test to be checked.
Returns:
Whether the test is annotated as a performance test.
"""
return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
def _GetTestCmdlineParameters(self, test):
"""Determines whether the test is parameterized to be run with different
command-line flags.
Args:
test: The name of the test to be checked.
Returns:
The list of parameters.
"""
annotations = self.test_pkg.GetTestAnnotations(test)
params = instrumentation_test_instance.ParseCommandLineFlagParameters(
annotations)
if not params:
params = [collections.namedtuple('Dummy', ['add', 'remove'])([], [])]
return params
def SetupPerfMonitoringIfNeeded(self, test):
"""Sets up performance monitoring if the specified test requires it.
Args:
test: The name of the test to be run.
"""
if not self._IsPerfTest(test):
return
self.device.RunShellCommand(
['rm', TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX])
self._logcat_monitor = self.device.GetLogcatMonitor()
self._logcat_monitor.Start()
def TestTeardown(self, test, results):
"""Cleans up the test harness after running a particular test.
Depending on the options of this TestRunner this might handle performance
tracking. This method will only be called if the test passed.
Args:
test: The name of the test that was just run.
results: results for this test.
"""
self.tool.CleanUpEnvironment()
if self.flags:
self.flags.Restore()
if not results:
return
if results.DidRunPass():
self.TearDownPerfMonitoring(test)
if self.coverage_dir:
self.device.PullFile(
self.coverage_device_file, self.coverage_host_file)
self.device.RunShellCommand(
'rm -f %s' % self.coverage_device_file)
elif self.package_info and not self.options.skip_clear_data:
apk_under_test = self.test_pkg.GetApkUnderTest()
permissions = apk_under_test.GetPermissions() if apk_under_test else None
self.device.ClearApplicationState(
self.package_info.package, permissions=permissions)
def TearDownPerfMonitoring(self, test):
"""Cleans up performance monitoring if the specified test required it.
Args:
test: The name of the test that was just run.
Raises:
Exception: if there's anything wrong with the perf data.
"""
if not self._IsPerfTest(test):
return
raw_test_name = test.split('#')[1]
# Wait and grab annotation data so we can figure out which traces to parse
regex = self._logcat_monitor.WaitFor(
re.compile(r'\*\*PERFANNOTATION\(' + raw_test_name + r'\)\:(.*)'))
# If the test is set to run on a specific device type only (IE: only
# tablet or phone) and it is being run on the wrong device, the test
# just quits and does not do anything. The java test harness will still
# print the appropriate annotation for us, but will add --NORUN-- for
# us so we know to ignore the results.
# The --NORUN-- tag is managed by ChromeTabbedActivityTestBase.java
if regex.group(1) != '--NORUN--':
# Obtain the relevant perf data. The data is dumped to a
# JSON formatted file.
json_string = self.device.ReadFile(
'/data/data/com.google.android.apps.chrome/files/PerfTestData.txt',
as_root=True)
if not json_string:
raise Exception('Perf file is empty')
if self.options.save_perf_json:
json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
with open(json_local_file, 'w') as f:
f.write(json_string)
logging.info('Saving Perf UI JSON from test ' +
test + ' to ' + json_local_file)
raw_perf_data = regex.group(1).split(';')
for raw_perf_set in raw_perf_data:
if raw_perf_set:
perf_set = raw_perf_set.split(',')
if len(perf_set) != 3:
raise Exception('Unexpected number of tokens in perf annotation '
'string: ' + raw_perf_set)
# Process the performance data
result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
perf_set[0])
perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2],
[result['average']],
result['units'])
def _SetupIndividualTestTimeoutScale(self, test):
timeout_scale = self.options.timeout_scale or 1
timeout_scale *= self._GetIndividualTestTimeoutScale(test)
valgrind_tools.SetChromeTimeoutScale(self.device, timeout_scale)
def _GetIndividualTestTimeoutScale(self, test):
"""Returns the timeout scale for the given |test|."""
annotations = self.test_pkg.GetTestAnnotations(test)
timeout_scale = 1
if 'TimeoutScale' in annotations:
try:
timeout_scale = int(annotations['TimeoutScale']['value'])
except ValueError:
logging.warning('Non-integer value of TimeoutScale ignored. (%s)',
annotations['TimeoutScale']['value'])
if self.options.wait_for_debugger:
timeout_scale *= 100
return timeout_scale
# pylint: disable=too-many-return-statements
def _GetIndividualTestTimeoutSecs(self, test):
"""Returns the timeout in seconds for the given |test|."""
annotations = self.test_pkg.GetTestAnnotations(test)
if 'Manual' in annotations:
return 10 * 60 * 60
if 'IntegrationTest' in annotations:
return 30 * 60
if 'External' in annotations:
return 10 * 60
if 'EnormousTest' in annotations:
return 10 * 60
if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
return 5 * 60
if 'MediumTest' in annotations:
return 3 * 60
if 'SmallTest' in annotations:
return 1 * 60
logging.warn("Test size not found in annotations for test '%s', using " +
"1 minute for timeout.", test)
return 1 * 60
def _RunTest(self, test, timeout):
"""Runs a single instrumentation test.
Args:
test: Test class/method.
timeout: Timeout time in seconds.
Returns:
The raw output of am instrument as a list of lines.
"""
extras = self._GetInstrumentationArgs()
extras['class'] = test
return self.device.StartInstrumentation(
'%s/%s' % (self.test_pkg.GetPackageName(), self.options.test_runner),
raw=True, extras=extras, timeout=timeout, retries=0)
# pylint: disable=no-self-use
def _GenerateTestResult(self, test, instr_result_code, instr_result_bundle,
statuses, start_ms, duration_ms):
results = instrumentation_test_instance.GenerateTestResults(
instr_result_code, instr_result_bundle, statuses, start_ms, duration_ms)
for r in results:
if r.GetName() == test:
return r
logging.error('Could not find result for test: %s', test)
return test_result.InstrumentationTestResult(
test, base_test_result.ResultType.UNKNOWN, start_ms, duration_ms)
#override
def RunTest(self, test):
results = base_test_result.TestRunResults()
timeout = (self._GetIndividualTestTimeoutSecs(test) *
self._GetIndividualTestTimeoutScale(test) *
self.tool.GetTimeoutScale())
cmdline_parameters = self._GetTestCmdlineParameters(test)
for flag_modifiers in cmdline_parameters:
start_ms = 0
duration_ms = 0
try:
if self._IsFreTest(test):
flag_modifiers.remove.append('--disable-fre')
self.TestSetup(test, flag_modifiers)
try:
self.device.GoHome()
except device_errors.CommandTimeoutError:
logging.exception('Failed to focus the launcher.')
time_ms = lambda: int(time.time() * 1000)
start_ms = time_ms()
raw_output = self._RunTest(test, timeout)
duration_ms = time_ms() - start_ms
# Parse the test output
result_code, result_bundle, statuses = (
instrumentation_test_instance.ParseAmInstrumentRawOutput(
raw_output))
result = self._GenerateTestResult(
test, result_code, result_bundle, statuses, start_ms, duration_ms)
if local_device_instrumentation_test_run.DidPackageCrashOnDevice(
self.test_pkg.GetPackageName(), self.device):
result.SetType(base_test_result.ResultType.CRASH)
except device_errors.CommandTimeoutError as e:
result = test_result.InstrumentationTestResult(
test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms,
log=str(e) or 'No information')
if self.package_info:
self.device.ForceStop(self.package_info.package)
self.device.ForceStop(self.package_info.test_package)
except device_errors.DeviceUnreachableError as e:
result = test_result.InstrumentationTestResult(
test, base_test_result.ResultType.CRASH, start_ms, duration_ms,
log=str(e) or 'No information')
if len(cmdline_parameters) > 1:
# Specify commandline flag modifications used in the test run
result_name = result.GetName()
if flag_modifiers.add:
result_name = '%s with {%s}' % (
result_name, ' '.join(flag_modifiers.add))
if flag_modifiers.remove:
result_name = '%s without {%s}' % (
result_name, ' '.join(flag_modifiers.remove))
result.SetName(result_name)
results.AddResult(result)
self.TestTeardown(test, results)
return (results, None if results.DidRunPass() else test)
|
|
'''
File: pinm.py
Description: Class definition
History:
Date Programmer SAR# - Description
---------- ---------- ----------------------------
Author: w. x. chan 29Apr2016 - Created
'''
'''
'''
import numpy as np
import autoD as ad
import sys
from matplotlib import pyplot
from matplotlib.widgets import Button
from matplotlib.collections import LineCollection
import time
from scipy import sparse
from scipy.sparse import linalg
import scipy
global lastOuputTime
lastOuputTime=0.
'''
--------------------Enable save and load session-------------------
Current code unable to pickle nodes after link and maximum recursion depth exceeded while pickling
'''
try:
import cPickle as pickle
except:
import pickle
#
class Session:
def __init__(self,fileName):
self.nodes=None
self.saveToFile=fileName
self.objectDomain={}
self.objectBasis={}
self.objectMaterial={}
self.objectTrack={}
self.objectSolver={}
def addDomain(self,domainName,domainObject):
self.objectDomain[domainName]=domainObject
return;
def addEquation(self,eqnName,eqnObject):
self.objectEquation[eqnName]=eqnObject
return;
def addBasis(self,name,object):
self.objectBasis[name]=object
return;
def addMaterial(self,name,object):
self.objectMaterial[name]=object
return;
def addTrack(self,name,object):
self.objectTrack[name]=object
return;
def addSolver(self,name,object):
self.objectSolver[name]=object
return;
def saveTo(self,fileName):
self.saveToFile=fileName
global currentSession
currentSession=Session('')
def saveSessionTo(fileName):
global currentSession
currentSession.saveToFile=fileName
def saveSession(fileName='',fast=False):
global currentSession
if fileName!='':
saveSessionTo(fileName)
#sanitize linker to refrain from recursion depth limit
if fast:
currentSession.nodes=None
else:
nodes=[]
count=0
for domain in currentSession.objectDomain.values():
for node in domain.nodes():
node.setIndex(count)
nodes.append(node)
count+=1
print("Saving progress : 'converting nodes'")
numOfNodes=len(nodes)
for m in range(numOfNodes):
updateProgress(float(m+1)/numOfNodes)
for linkIdentifier in nodes[m].link:
for n in range(len(nodes[m].link[linkIdentifier])):
if type(nodes[m].link[linkIdentifier][n]) is Node:
ind=nodes[m].link[linkIdentifier][n].ind
nodes[m].link[linkIdentifier][n]=ind
currentSession.nodes=nodes
with open(currentSession.saveToFile, "wb") as file:
pickle.dump(currentSession, file)
if currentSession.nodes!=None:
for domain in currentSession.objectDomain.values():
for node in domain.nodes():
for linkIdentifier in node.link:
for n in range(len(node.link[linkIdentifier])):
if type(node.link[linkIdentifier][n]) is int:
node.link[linkIdentifier][n]=currentSession.nodes[node.link[linkIdentifier][n]]
currentSession.nodes=None
def loadSession(file):
global currentSession
with open(file, "rb") as f:
result = pickle.load(f)
currentSession=result
if currentSession.nodes!=None:
for domain in currentSession.objectDomain.values():
for node in domain.nodes():
for linkIdentifier in node.link:
for n in range(len(node.link[linkIdentifier])):
if type(node.link[linkIdentifier][n]) is int:
node.link[linkIdentifier][n]=currentSession.nodes[node.link[linkIdentifier][n]]
currentSession.nodes=None
return result
def setSessionAsCurrent(sessionObject):
global currentSession
currentSession=sessionObject
return;
def updateProgress(progress):
global lastOuputTime
if ((time.time()-lastOuputTime)>30.) or (progress >= 1):
barLength = 10 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
if progress < 0:
text = str(-progress)
else:
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
lastOuputTime=time.time()
'''
--------------------End of Session class and functions-------------------
'''
class Node:
def __init__(self,pos,norm={}): #pos and norm are type dict pos={'x':1,'y':2}
self.type='node'
self.pos=pos
self.variable={}
self.variableSolveToggle={}
self.deltaVariable={}
self.variableCal={}
self.eqn={}
self.eqnSolveToggle={}
self.link={} #include self if basis used requires
self.linkBasis={}
self.momentMatrix={}
self.transformation={}
self.choleskyDecomposition={}
self.norm=norm #normal, vector, for use of surface equation
self.normLink=''
self.material=[]
self.variableLink={}
self.domain=None
def setIndex(self,ind):#temporary
self.ind=ind
def setDomain(self,domain):
self.domain=domain
return;
def setDeltaVariable(self,variableIdentifier,value):
self.deltaVariable[variableIdentifier]=value
return;
def resetDeltaVariable(self):
for variableIdentifier in self.deltaVariable:
self.deltaVariable[variableIdentifier]=0.
return;
def addVariable(self,variableIdentifier,init_value):
#variable_name,init_value are lists
if type(variableIdentifier) is list:
for n in range(len(variableIdentifier)):
if variableIdentifier[n] not in self.variable:
self.variableCal[variableIdentifier[n]]=ad.Function(pointInterpolationMethod,self,variableIdentifier[n])
self.deltaVariable[variableIdentifier[n]]=0.
self.variableSolveToggle[variableIdentifier[n]]=True
self.variable[variableIdentifier[n]]=init_value[n]
else:
if variableIdentifier not in self.variable:
self.variableCal[variableIdentifier]=ad.Function(pointInterpolationMethod,self,variableIdentifier)
self.deltaVariable[variableIdentifier]=0.
self.variableSolveToggle[variableIdentifier]=True
self.variable[variableIdentifier]=init_value
return;
def removeVariable(self,variableIdentifier):
if type(variableIdentifier) is list:
for n in range(len(variableIdentifier)):
del self.variable[variableIdentifier[n]]
del self.deltaVariable[variableIdentifier[n]]
del self.variableLink[variableIdentifier[n]]
del self.variableCal[variableIdentifier[n]]
del self.variableSolveToggle[variableIdentifier[n]]
else:
del self.variable[variableIdentifier]
del self.variableLink[variableIdentifier]
del self.variableCal[variableIdentifier]
del self.variableSolveToggle[variableIdentifier]
return;
def toggleVariable(self,variableIdentifier,switch):
if type(variableIdentifier) is list:
for n in range(len(variableIdentifier)):
if switch:
self.variableSolveToggle[variableIdentifier[n]]=True
else:
self.variableSolveToggle[variableIdentifier[n]]=False
else:
if switch:
self.variableSolveToggle[variableIdentifier]=True
else:
self.variableSolveToggle[variableIdentifier]=False
def updateNewVariable(self):
for variableIdentifier in self.variable:
if self.variableSolveToggle:
if float('-inf')<self.deltaVariable[variableIdentifier]<float('inf'):
self.variable[variableIdentifier]=self.variable[variableIdentifier]+self.deltaVariable[variableIdentifier]
else:
print('Waring! ',self.deltaVariable[variableIdentifier],' encountered in deltaVariable')
return;
def setNorm(self,norm):
self.norm=norm
return;
def setNormLink(self,linkIdentifier):
self.normLink=linkIdentifier
return;
def setLinkBasis(self,linkIdentifier,basis):
self.linkBasis[linkIdentifier]=basis
return;
def addMaterial(self,materialIndex,material):
while len(self.material)<=materialIndex:
self.material.append(None)
self.material[materialIndex]=Material(material.name) #materialIndex is use for differentiating internal and external of surface etc.
#fixed properties to node
for propertyIdentifier in material.properties:
if callable(material.properties[propertyIdentifier]):
tempcall=material.properties[propertyIdentifier](self)
self.material[materialIndex].setProperty(propertyIdentifier,tempcall)
else:
self.material[materialIndex].setProperty(propertyIdentifier,ad.Constant(material.properties[propertyIdentifier]))
return;
def addEquation(self,eqnClass):
tempcall=eqnClass(self)
self.eqn[tempcall.name]=tempcall
self.eqnSolveToggle[tempcall.name]=True
return;
def toggleEquation(self,eqnIdentifier,switch):
if type(eqnIdentifier) is list:
for n in range(len(eqnIdentifier)):
if switch:
self.eqnSolveToggle[eqnIdentifier[n]]=True
else:
self.eqnSolveToggle[eqnIdentifier[n]]=False
else:
if switch:
self.eqnSolveToggle[eqnIdentifier]=True
else:
self.eqnSolveToggle[eqnIdentifier]=False
def setPos(self,value):
self.pos=pos #pos is a dict pos={'x':1,'y':2}
return;
def addLink(self,linkIdentifier,nodalObject):
if linkIdentifier not in self.link:
self.link[linkIdentifier]=[]
self.momentMatrix[linkIdentifier]=np.zeros(0)
self.transformation[linkIdentifier]=np.zeros(0)
self.choleskyDecomposition[linkIdentifier]=np.zeros(0)
if type(nodalObject) is list:
for n in range(len(nodalObject)):
if nodalObject[n] not in self.link[linkIdentifier]:
self.link[linkIdentifier].append(nodalObject[n])
else:
if nodalObject not in self.link[linkIdentifier]:
self.link[linkIdentifier].append(nodalObject)
return;
def removeLink(self,linkIdentifier,nodalObject=None):
if linkIdentifier in self.link:
if type(nodalObject) is list:
for n in range(len(nodalObject)):
self.link[linkIdentifier].remove(nodalObject[n])
elif nodalObject==None:
del self.link[linkIdentifier]
del self.momentMatrix[linkIdentifier]
del self.transformation[linkIdentifier]
del self.choleskyDecomposition[linkIdentifier]
if linkIdentifier in self.linkBasis:
del self.linkBasis[linkIdentifier]
else:
self.link[linkIdentifier].remove(nodalObject)
else:
print('"',linkIdentifier,'" does not exists.')
return;
def setVariableLink(self,variableIdentifier,linkIdentifier):
if type(variableIdentifier) is list:
for n in range(len(variableIdentifier)):
self.variableLink[variableIdentifier[n]]=linkIdentifier[n]
else:
self.variableLink[variableIdentifier]=linkIdentifier
return;
def updateShapeFunction(self,linkIdentifier):
emptydOrder={}
newMomentMatrix_temp=[]
self.linkBasis[linkIdentifier].setNode(self)
for n in self.link[linkIdentifier]:
newMomentMatrix_temp.append(self.linkBasis[linkIdentifier].cal(n.pos,emptydOrder))
newMomentMatrix=np.vstack(tuple(newMomentMatrix_temp))
if not(np.array_equal(self.momentMatrix[linkIdentifier],newMomentMatrix)):
self.momentMatrix[linkIdentifier]=newMomentMatrix
shape=newMomentMatrix.shape
if shape[0]==shape[1]:
self.transformation[linkIdentifier]=np.transpose(self.momentMatrix[linkIdentifier])
leastSQ=np.dot(self.transformation[linkIdentifier],self.momentMatrix[linkIdentifier])
self.choleskyDecomposition[linkIdentifier]=scipy.linalg.cholesky(leastSQ)
#self.shapeFunctionMatrix[linkIdentifier]=np.linalg.inv(self.momentMatrix[linkIdentifier])
elif shape[0]>shape[1]:
maxDistance={}
distance=[]
for n in range(len(self.link[linkIdentifier])):
distance.append({})
for coord in self.pos:
distance[n][coord]=np.absolute(self.link[linkIdentifier][n].pos[coord]-self.pos[coord])
if coord not in maxDistance:
maxDistance[coord]=distance[-1][coord]
else:
maxDistance[coord]=max(maxDistance[coord],distance[n][coord])
weight=[]
for n in range(len(self.link[linkIdentifier])):
tempWeight=0.
for coord in self.pos:
tempWeight+=(distance[n][coord]/maxDistance[coord])**2.
weight.append(np.exp(-tempWeight/0.2))
weightMatrix=np.diag(weight)
self.transformation[linkIdentifier]=np.dot(np.transpose(self.momentMatrix[linkIdentifier]),weightMatrix)
leastSQ=np.dot(self.transformation[linkIdentifier],self.momentMatrix[linkIdentifier])
self.choleskyDecomposition[linkIdentifier]=scipy.linalg.cholesky(leastSQ)
#self.shapeFunctionMatrix[linkIdentifier]=np.dot(invLeastSQ,transformation)
else:
print('Not enough support nodes')
return;
class Basis(ad.Function):
def __init__(self,basis,basisName,*specificArgs):
self.type='basis'
self.name=basisName
self.specificArgs=specificArgs
if basisName not in currentSession.objectBasis:
currentSession.addBasis(basisName,self)
ad.Function.__init__(self,basis,None,*specificArgs)
def changeSpecificArgs(self,*specificArgs):
self.specificArgs=specificArgs
def setNode(self,nodalObject):
specificArgs=self.specificArgs
self.changeArgs(nodalObject,*specificArgs)
return;
class Domain:
def __init__(self,domainName,norm={}):
self.type='domain'
self.name=domainName
if domainName!='':
if domainName not in currentSession.objectDomain:
currentSession.addDomain(domainName,self)
self.subDomain=[] #list of subdomains or nodes
self.superDomain=None
self.normalVector=norm
self.maxDistance={}
self.vertices=None
self.pos={}
def setCentroid(self,vertices): #note this is just the average of the vertices not the true centroid
self.vertices=vertices
maxDistance={}
centroid={}
numOfVertices=len(vertices)
for coord in vertices[0]:
total=0.
for vert in vertices:
total+=vert[coord]
centroid[coord]=total/numOfVertices
self.pos=centroid
for coord in vertices[0]:
maxDistance[coord]=0.
for vert in vertices:
temp_distance=np.absolute(vert[coord]-centroid[coord])
if temp_distance>maxDistance[coord]:
maxDistance[coord]=temp_distance
self.maxDistance=maxDistance
return;
def addNode(self,nodalObjects):
if type(nodalObjects) is list:
for n in range(len(nodalObjects)):
if nodalObjects[n] not in self.subDomain:
self.subDomain.append(nodalObjects[n])
if type(nodalObjects[n]) is Node:
nodalObjects[n].setDomain(self)
else:
nodalObjects[n].setSuperDomain(self)
else:
if nodalObjects not in self.subDomain:
self.subDomain.append(nodalObjects)
if type(nodalObjects) is Node:
nodalObjects.setDomain(self)
else:
nodalObjects.setSuperDomain(self)
return;
def removeNode(self,nodes):
if type(nodes) is list:
for n in range(len(nodes)):
self.subDomain.remove(nodes[n])
else:
self.subDomain.remove(nodes)
return;
def setDomainName(self,domainName):
self.name=domainName
if domainName not in currentSession.objectDomain:
currentSession.addDomain(domainName,self)
return;
def setSuperDomain(self,superDomain):
if self.superDomain !=None:
print('Warning! Overwriting superDomain.')
self.superDomain.removeNode(self)
self.superDomain=superDomain
return;
def nodes(self):
nodes=[]
for subDomain in self.subDomain:
if type(subDomain) is Node:
nodes.append(subDomain)
else:
for temp_nodes in subDomain.nodes():
nodes.append(temp_nodes)
return nodes
def setMaterial(self,materialIndex,materialObject):
for node in self.nodes():
node.addMaterial(materialIndex,materialObject)
return;
def addEquation(self,eqnClass):
if type(eqnClass) is list:
for n in range(len(eqnClass)):
for node in self.nodes():
node.addEquation(eqnClass[n])
else:
for node in self.nodes():
node.addEquation(eqnClass)
def toggleEquation(self,eqnIdentifier,switch):
for node in self.nodes():
node.toggleEquation(eqnIdentifier,switch)
def setBasis(self,linkIdentifier,basisObject):
for node in self.nodes():
node.setLinkBasis(linkIdentifier,basisObject)
def setVariable(self,variableIdentifier,init_value):
for node in self.nodes():
node.addVariable(variableIdentifier,init_value)
def setVariableLink(self,variableIdentifier,linkIdentifier):
for node in self.nodes():
node.setVariableLink(variableIdentifier,linkIdentifier)
def toggleVariable(self,variableIdentifier,switch):
for node in self.nodes():
node.toggleVariable(variableIdentifier,switch)
class Material:
def __init__(self,materialName):
self.type='material'
self.name=materialName
if materialName not in currentSession.objectMaterial:
currentSession.addMaterial(materialName,self)
self.properties={}
def setProperty(self,propertyIdentifier,classOrValue):
self.properties[propertyIdentifier]=classOrValue
return;
def removeProperty(self,propertyIdentifier):
del self.properties[propertyIdentifier]
return;
class Track:
def __init__(self,trackName):
self.type='track'
self.tracker={}
self.trackerColor={}
self.trackerToggle={}
self.name=trackName
if trackName not in currentSession.objectTrack:
currentSession.addTrack(trackName,self)
self.value={}
def addTracker(self,trackerIdentifier,color,func): #func=ad.Function()
self.tracker[trackerIdentifier]=func
self.value[trackerIdentifier]=0.
self.trackerColor[trackerIdentifier]=color
self.trackerToggle[trackerIdentifier]=True
return;
def removeTracker(self,trackerIdentifier):
del self.tracker[trackerIdentifier]
del self.value[trackerIdentifier]
del self.trackerColor[trackerIdentifier]
del self.trackerToggle[trackerIdentifier]
return;
def toggleTracker(self,trackerIdentifier,switch):
if type(trackerIdentifier) is list:
for n in range(len(trackerIdentifier)):
if switch:
self.trackerToggle[trackerIdentifier[n]]=True
else:
self.trackerToggle[trackerIdentifier[n]]=False
else:
if switch:
self.trackerToggle[trackerIdentifier]=True
else:
self.trackerToggle[trackerIdentifier]=False
def update(self):
for trackerIdentifier in self.tracker:
self.value[trackerIdentifier]=self.tracker[trackerIdentifier].cal({},{})
class Solver:
def __init__(self,solverName):
self.type='solver'
self.name=solverName
if solverName not in currentSession.objectSolver:
currentSession.addSolver(solverName,self)
self.domain={}
self.track=None
self.stop=False
self.errorMonintorTracker=''
self.errorMonintorErrorValue=0
self.errorMonintorInvert=False
self.nodes=[]
self.jMatrixRow=[]
self.jMatrixCol=[]
self.jMatrixData=[]
self.jMatrixShape=None
self.varIndex=[]
self.indexVar=[]
self.equationIndex=[]
self.equationNodeIndex=[]
self.fxValue=[]
def reset(self):
self.stop=False
def setTrack(self,trackObject): #input Track object
self.track=trackObject
return;
def addDomain(self,domainObject): #input Domain object
self.domain[domainObject.name]=domainObject
return;
def removeDomain(self,domainName):
del self.domain[domainName]
return;
def addEquationIntoDomains(self,equationClass,domainName):
if type(domainName) is list:
for n in range(len(domainName)):
self.domain[domainName[n]].addEquation(self,eqnClass)
else:
self.domain[domainName].addEquation(self,eqnClass)
return;
def setErrorMonintor(self,trackerIdentifier,errorValue,invert=False):
self.errorMonintorTracker=trackerIdentifier
self.errorMonintorErrorValue=errorValue
self.errorMonintorInvert=invert
def stopCheck(self):
if self.errorMonintorInvert==False:
if self.track.value[self.errorMonintorTracker]<self.errorMonintorErrorValue:
self.stop=True
elif self.errorMonintorInvert==True:
if self.track.value[self.errorMonintorTracker]>self.errorMonintorErrorValue:
self.stop=True
'''
def iterate_stationary(self):
#Newton-Raphson iteration to find variable values
#Gauss-Seidel Iteration Methods to find delta of variable values in Newton-Raphson iteration
emptydOrder={}
#determine Gauss-Seidel Iteration coefficients ie the Jacobian matrix
###########################its not always converging!!!!
jMatrix={}
fxValue={}
for domainIdentifier in self.domain:
for node in self.domain[domainIdentifier].nodes():
node.resetDeltaVariable()
print("Solving Progress : 'Jacobian coefficients'")
for domainIdentifier in self.domain:
jMatrix[domainIdentifier]=[]
fxValue[domainIdentifier]=[]
print("Solving Progress : 'Domain ",domainIdentifier,"'")
nodes=self.domain[domainIdentifier].nodes()
nodeNum=len(nodes)
for n in range(len(nodes)):
jMatrix[domainIdentifier].append({})
fxValue[domainIdentifier].append({})
for equationName in nodes[n].eqn:
if nodes[n].eqnSolveToggle[equationName]:
tempfxValue=nodes[n].eqn[equationName].cal(nodes[n].pos,emptydOrder)
fxValue[domainName][n][equationName]=tempfxValue
if not(float('-inf')<tempfxValue<float('inf')):
print('Waring! ',tempfxValue,' encountered for fxValue')
jMatrix[domainIdentifier][n][equationName]={}
for variableIdentifier in nodes[n].variable:
for link in nodes[n].link[nodes[n].variableLink[variableIdentifier]]:
if link.variableSolveToggle[variableIdentifier]:
var=link.variableCal[variableIdentifier]
tempjMatrixValue=nodes[n].eqn[equationName].cal(nodes[n].pos,{var:1})
jMatrix[domainIdentifier][n][equationName][var]=tempjMatrixValue
if not(float('-inf')<tempjMatrixValue<float('inf')):
print('Waring! ',tempjMatrixValue,' encountered in calculating Jacobian')
updateProgress(float(n+1)/nodeNum)
#Gauss-Seidel Iteration Method with alteration
converged=False
iterCount=0
print('Solving Progress : Gauss-Seidel Iteration Method')
while not(converged):
converged=True
for domainIdentifier in self.domain:
nodes=self.domain[domainIdentifier].nodes()
nodeNum=len(nodes)
for n in range(len(nodes)):
for equationName in nodes[n].eqn:
if nodes[n].eqnSolveToggle[equationName]:
update={}
for variableIdentifier in nodes.variable:
if node.variableSolveToggle[variableIdentifier]:
var=node.variableCal[variableIdentifier]
if jMatrix[domainIdentifier][n][equationName][var]!=0.:
sumtotal=0.
for variable in jMatrix[domainIdentifier][n][equationName]:
linkNode,linkVariableIdentifier=variable.checkArgs()
if variable!=var and linkNode.variableSolveToggle[linkVariableIdentifier]:
sumtotal+=jMatrix[domainIdentifier][n][equationName][variable]*linkNode.deltaVariable[linkVariableIdentifier]
update[var]=(fxValue[domainName][n][equationName]-sumtotal)/jMatrix[domainIdentifier][n][equationName][var]
for var in update:
oldVar=var.checkArgs()[0].deltaVariable[var.checkArgs()[1]]
var.checkArgs()[0].setDeltaVariable(var.checkArgs()[1],update[var])
if update[var]!=0:
if np.absolute(np.absolute(oldVar/update[var]-1.))>self.errorMonintorErrorValue:
converged=False
elif np.absolute(np.absolute(oldVar))>self.errorMonintorErrorValue:
converged=False
iterCount-=1
updateProgress(iterCount)
print(' -------Converged')
#update variables
for domain in self.domain.values():
for node in domain.nodes():
node.updateNewVariable()
return;
'''
def calFx(self,start=0,stop=0):
#calculate the Jacobian matrix
if stop<=start:
stop=self.jMatrixShape[0]
tempNum=stop-start
print('Solving Progress : Updating Function value')
result=[]
for n in range(self.jMatrixShape[0]):
tempfxValue=self.equationIndex[n].cal(self.nodes[self.equationNodeIndex[n]].pos,{})
self.fxValue[n]=-tempfxValue
result.append(-tempfxValue)
if not(float('-inf')<tempfxValue<float('inf')):
print('Waring! ',tempfxValue,' encountered for fxValue')
updateProgress(float(n+1)/tempNum)
return result
def calJacobianMatrix(self,initialRun=False,start=0,stop=0):
if stop<=start:
stop=len(self.jMatrixRow)
#calculate the Jacobian matrix
tempNum=stop-start
print('Solving Progress : Updating Jacobian Matrix')
result=[]
for n in range(start,stop):
if self.equationIndex[self.jMatrixRow[n]].nonLinear or initialRun:
tempjMatrixValue=self.equationIndex[self.jMatrixRow[n]].cal(self.nodes[self.equationNodeIndex[self.jMatrixRow[n]]].pos,{self.indexVar[self.jMatrixCol[n]]:1})
self.jMatrixData[n]=tempjMatrixValue
result.append(tempjMatrixValue)
if not(float('-inf')<tempjMatrixValue<float('inf')):
print('Waring! ',tempjMatrixValue,' encountered in calculating Jacobian')
updateProgress(float(n+1)/tempNum)
return result
def iterate(self):
jMatrixSparse=sparse.csr_matrix((self.jMatrixData, (self.jMatrixRow, self.jMatrixCol)), shape=self.jMatrixShape)
fxValueMat=np.array(self.fxValue)
sumAll=0.
for allValue in self.fxValue:
sumAll+=np.absolute(np.absolute(allValue))**2.
print(sumAll)
tempResult=linalg.lsqr(jMatrixSparse,fxValueMat)
newDeltaVar=tempResult[0]
#update variables
for n in range(len(self.varIndex)):
for variableIdentifier in self.varIndex[n]:
self.nodes[n].setDeltaVariable(variableIdentifier,newDeltaVar[self.varIndex[n][variableIdentifier]])
self.nodes[n].updateNewVariable()
return;
def resetJMatrix(self):
self.nodes=[]
indCount=0
for domainIdentifier in self.domain:
for node in self.domain[domainIdentifier].nodes():
node.ind=indCount
self.nodes.append(node)
indCount+=1
self.varIndex=[]
for node in self.nodes:
node.resetDeltaVariable()
self.varIndex.append({})
self.jMatrixRow=[]
self.jMatrixCol=[]
self.jMatrixData=[]
self.jMatrixShape=None
self.indexVar=[]
self.equationIndex=[]
self.equationNodeIndex=[]
self.fxValue=[]
if self.errorMonintorTracker=='' or self.errorMonintorTracker=='default error track':
if self.track==None:
newTrack=Track('error')
self.setTrack(newTrack)
defaultErrorTrack=ad.Function(defaultError,self.nodes)
self.track.addTracker('default error track','k',defaultErrorTrack)
self.setErrorMonintor('default error track',0.001)
equationCount=0
varCount=0
nodeNum=len(self.nodes)
for n in range(len(self.nodes)):
for equationName in self.nodes[n].eqn:
if self.nodes[n].eqnSolveToggle[equationName]:
self.equationIndex.append(self.nodes[n].eqn[equationName])
self.equationNodeIndex.append(n)
self.fxValue.append(0.)
for variableIdentifier in self.nodes[n].variable:
for link in self.nodes[n].link[self.nodes[n].variableLink[variableIdentifier]]:
if link.variableSolveToggle[variableIdentifier]:
var=link.variableCal[variableIdentifier]
if variableIdentifier not in self.varIndex[link.ind]:
self.varIndex[link.ind][variableIdentifier]=varCount
self.indexVar.append(link.variableCal[variableIdentifier])
varCount+=1
self.jMatrixCol.append(self.varIndex[link.ind][variableIdentifier])
self.jMatrixRow.append(equationCount)
self.jMatrixData.append(0.)
equationCount+=1
self.jMatrixShape=(equationCount, varCount)
def solve(self,fullSolve=True):
countplot={}
trackplot={}
if len(self.nodes)==0:
initRun=True
self.resetJMatrix()
else:
initRun=False
for trackIdentifier in self.track.value:
countplot[trackIdentifier]=[]
trackplot[trackIdentifier]=[]
if not(fullSolve):
self.iterate()
self.track.update()
return self.track.value[self.errorMonintorTracker]
figure = pyplot.figure()
pyplot.subplots_adjust(bottom=0.2)
pyplot.xlabel('Iteration')
pyplot.ylabel(self.errorMonintorTracker)
callback = solverControl(self)
axpause = pyplot.axes([0.7, 0.05, 0.1, 0.075])
axcont = pyplot.axes([0.81, 0.05, 0.1, 0.075])
axstop = pyplot.axes([0.05, 0.05, 0.15, 0.075])
bstop = Button(axstop, 'Stop')
bstop.on_clicked(callback.stop)
bpause = Button(axpause, 'Pause')
bpause.on_clicked(callback.pause)
bcont = Button(axcont, 'Continue')
bcont.on_clicked(callback.cont)
count=0
ymin=float('inf')
ymax=10.**-30.
pyplot.ion()
ax = pyplot.axes()
ax.set_yscale('log')
while self.stop==False:
while callback.pausing==False and self.stop==False:
if count==0 and initRun:
self.calJacobianMatrix(True)
else:
self.calJacobianMatrix()
self.calFx()
self.iterate()
self.track.update()
for trackIdentifier in self.track.value:
countplot[trackIdentifier].append(count)
trackplot[trackIdentifier].append(self.track.value[trackIdentifier])
ymin=min(ymin,self.track.value[trackIdentifier])
ymax=max(ymax,self.track.value[trackIdentifier])
if count>0:
line_segment = LineCollection([[(countplot[trackIdentifier][-2],trackplot[trackIdentifier][-2]),(countplot[trackIdentifier][-1],trackplot[trackIdentifier][-1])]],
linewidths=(0.5, 1, 1.5, 2),
linestyles='solid',
colors=self.track.trackerColor[trackIdentifier])
ax.add_collection(line_segment)
if count>0:
ax.set_xlim(0, count)
if ymin<(10.**-30.):
ymin=10.**-30.
ax.set_ylim(ymin, ymax)
pyplot.draw()
pyplot.pause(0.05)
self.stopCheck()
count+=1
pyplot.pause(0.05)
pyplot.show()
def defaultError(x,dOrder,nodeList):
error=0.
maxVar={}
for node in nodeList:
for var in node.variable:
if var in maxVar:
maxVar[var]=max(maxVar[var],np.absolute(np.absolute(node.variable[var])))
else:
maxVar[var]=np.absolute(node.variable[var])
for node in nodeList:
for var in node.variable:
if maxVar[var]!=0:
error=max(error,np.absolute(np.absolute(node.deltaVariable[var]))/maxVar[var])
else:
error=1.
return error
class solverControl:
def __init__(self,solverObject):
self.solver=solverObject
self.pausing=False
def stop(self, event):
self.stopFunc()
def pause(self, event):
self.pauseFunc()
def cont(self, event):
self.contFunc()
def stopFunc(self):
self.solver.stop=True
def pauseFunc(self):
self.pausing=True
def contFunc(self):
self.pausing=False
'''
------------------------Method-----------------------------------------
'''
def pointInterpolationMethod(x,dOrder,nodalObject,variableIdentifier):
#check basis variable
linkIdentifier=nodalObject.variableLink[variableIdentifier]
nodalObject.linkBasis[linkIdentifier].changeArgs(nodalObject)
nodalObject.updateShapeFunction(linkIdentifier)
for n in range(len(nodalObject.link[linkIdentifier])):
if nodalObject.link[linkIdentifier][n].variableCal[variableIdentifier] in dOrder:
valMatrix_temp=np.zeros(len(nodalObject.link[linkIdentifier]))
valMatrix_temp[n]=1.
valMatrix=np.transpose(valMatrix_temp)
new_dOrder=dOrder.copy()
new_dOrder[nodalObject.link[linkIdentifier][n].variableCal[variableIdentifier]]=dOrder[nodalObject.link[linkIdentifier][n].variableCal[variableIdentifier]]-1
break
else:
valMatrix_temp=[]
for m in nodalObject.link[linkIdentifier]:
valMatrix_temp.append(m.variable[variableIdentifier])
valMatrix=np.vstack(tuple(valMatrix_temp))
new_dOrder=dOrder.copy()
basisVal=nodalObject.linkBasis[linkIdentifier].cal(x,new_dOrder)
tempValMatrix=np.dot(nodalObject.transformation[linkIdentifier],valMatrix)
basisCoef=scipy.linalg.cho_solve((nodalObject.choleskyDecomposition[linkIdentifier],False),tempValMatrix)
result=np.dot(basisVal,basisCoef)
return np.sum(result) #ensure return float
|
|
#===============================================================================
# OMDb API Search Script - Module - dispdat
#-------------------------------------------------------------------------------
# Version: 0.1.3
# Updated: 03-11-2013
# Author: Alex C.
# License: MIT
#-------------------------------------------------------------------------------
# Notes
#===============================================================================
"""
Contains functions for displaying movie data, either from a JSON file,
or from the OMDb API.
"""
#===============================================================================
# IMPORTS
#===============================================================================
import os
import re
from const import *
#===============================================================================
# PADDING VARIABLES
#===============================================================================
infopad = 0
imdbpad = 0
rtpad = 0
#===============================================================================
# DISPLAY MOVIE INFORMATION
#===============================================================================
def movdata(mov):
""""""
osclear()
brokenplot = linebreak(mov[MKEY[13]])
brokenstars = linebreak(mov[MKEY[7]], False)
title(mov[MKEY[3]], mov[MKEY[4]])
print brokenplot
title("Movie Info")
print INFOLBL[0]
print mov[MKEY[5]] + NL
print INFOLBL[1]
print mov[MKEY[6]] + NL
print INFOLBL[2]
print brokenstars + NL
print INFOLBL[3]
print mov[MKEY[8]] + NL
print INFOLBL[4]
print mov[MKEY[9]] + NL
print INFOLBL[5]
print mov[MKEY[10]] + NL
print INFOLBL[6]
print mov[MKEY[11]] + NL
print INFOLBL[7]
print mov[MKEY[12]] + NL
title("IMDb Rating")
print IMDBLBL[0]
print mov[MKEY[14]] + "/10" + NL
print IMDBLBL[1]
print mov[MKEY[15]] + NL
title("Rotten Tomatoes Rating")
print RTLBL[0]
if mov[MKEY[16]] == "certified":
print "Certified Fresh" + NL
else:
print mov[MKEY[16]].capitalize() + NL
print RTLBL[1]
print mov[MKEY[17]] + "%" + NL
print RTLBL[2]
print mov[MKEY[18]] + "/10" + NL
print RTLBL[3]
print mov[MKEY[19]] + NL
print RTLBL[4]
print mov[MKEY[20]] + "%" + NL
print RTLBL[2]
print mov[MKEY[21]] + "/5" + NL
print RTLBL[5]
print mov[MKEY[22]] + NL
print HR + NL
#===============================================================================
# GENERAL FORMATTING FUNCTIONS
#===============================================================================
#-------------------------------------------------------------------------------
# OS Depndant Clear Screen
#-------------------------------------------------------------------------------
def osclear ():
""""""
if OSNAME in ["nt", "dos"]:
os.system('cls')
elif OSNAME is "posix":
os.system('clear')
#-------------------------------------------------------------------------------
# Title Printer
#-------------------------------------------------------------------------------
def title (*args):
""""""
argl = len(args)
print HR
if argl == 1:
print args[0]
elif argl == 2:
print args[0] + " (" + args[1] + ")"
else:
pass
print HR + NL
#-------------------------------------------------------------------------------
# Section Title Printer
#-------------------------------------------------------------------------------
def sect (title):
""""""
print HR3 + " " + title + " " + HR3 + NL
#-------------------------------------------------------------------------------
# Numbered List Printer
#-------------------------------------------------------------------------------
def numlist (thelist, upper=False):
""""""
index = 1
if not upper:
for item in thelist:
print "[{0}] {1}".format(index, item)
index += 1
elif upper:
for item in thelist:
print "[{0}] {1}".format(index, item.upper())
index += 1
print
#-------------------------------------------------------------------------------
# Runtime Converter
#-------------------------------------------------------------------------------
def convtime (time):
""""""
spltime = time.split(" ")
mintime = None
if len(spltime) == 4:
spltime[0] = int(spltime[0])
spltime[2] = int(spltime[2])
mintime = str((60 * spltime[0]) + spltime[2]) + " min"
return mintime
#-------------------------------------------------------------------------------
# Padding Setter
#-------------------------------------------------------------------------------
def padset (INFOLBL, IMDBLBL, RTLBL):
""""""
global infopad, imdbpad, rtpad
temppad = 0
for x in INFOLBL:
if len(x) > infopad:
infopad = len(x) + 1
for x in IMDBLBL:
if len(x) > imdbpad:
imdbpad = len(x) + 1
for x in RTLBL:
if len(x) > rtpad:
rtpad = len(x) + 1
#-------------------------------------------------------------------------------
# Line Breaker
#-------------------------------------------------------------------------------
def linebreak (plot, nl=True):
""""""
if nl:
compiled = re.compile("(.{,79})($|\s)")
subbed = compiled.sub("\\1\n", plot)
elif not nl:
compiled = re.compile("(.{,79})($|\s)")
subbed = compiled.sub("\\1", plot)
return subbed
#===============================================================================
# PADDING SETUP CALL
#===============================================================================
padset(INFOLBL, IMDBLBL, RTLBL)
|
|
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Package-internal interfaces."""
import abc
import six
from grpc.framework.interfaces.base import base
class TerminationManager(six.with_metaclass(abc.ABCMeta)):
"""An object responsible for handling the termination of an operation.
Attributes:
outcome: None if the operation is active or a base.Outcome value if it has
terminated.
"""
@abc.abstractmethod
def add_callback(self, callback):
"""Registers a callback to be called on operation termination.
If the operation has already terminated the callback will not be called.
Args:
callback: A callable that will be passed a base.Outcome value.
Returns:
None if the operation has not yet terminated and the passed callback will
be called when it does, or a base.Outcome value describing the
operation termination if the operation has terminated and the callback
will not be called as a result of this method call.
"""
raise NotImplementedError()
@abc.abstractmethod
def emission_complete(self):
"""Indicates that emissions from customer code have completed."""
raise NotImplementedError()
@abc.abstractmethod
def transmission_complete(self):
"""Indicates that transmissions to the remote end are complete.
Returns:
True if the operation has terminated or False if the operation remains
ongoing.
"""
raise NotImplementedError()
@abc.abstractmethod
def reception_complete(self, code, details):
"""Indicates that reception from the other side is complete.
Args:
code: An application-specific code value.
details: An application-specific details value.
"""
raise NotImplementedError()
@abc.abstractmethod
def ingestion_complete(self):
"""Indicates that customer code ingestion of received values is complete."""
raise NotImplementedError()
@abc.abstractmethod
def expire(self):
"""Indicates that the operation must abort because it has taken too long."""
raise NotImplementedError()
@abc.abstractmethod
def abort(self, outcome):
"""Indicates that the operation must abort for the indicated reason.
Args:
outcome: A base.Outcome indicating operation abortion.
"""
raise NotImplementedError()
class TransmissionManager(six.with_metaclass(abc.ABCMeta)):
"""A manager responsible for transmitting to the other end of an operation."""
@abc.abstractmethod
def kick_off(
self, group, method, timeout, protocol_options, initial_metadata,
payload, completion, allowance):
"""Transmits the values associated with operation invocation."""
raise NotImplementedError()
@abc.abstractmethod
def advance(self, initial_metadata, payload, completion, allowance):
"""Accepts values for transmission to the other end of the operation.
Args:
initial_metadata: An initial metadata value to be transmitted to the other
side of the operation. May only ever be non-None once.
payload: A payload value.
completion: A base.Completion value. May only ever be non-None in the last
transmission to be made to the other side.
allowance: A positive integer communicating the number of additional
payloads allowed to be transmitted from the other side to this side of
the operation, or None if no additional allowance is being granted in
this call.
"""
raise NotImplementedError()
@abc.abstractmethod
def timeout(self, timeout):
"""Accepts for transmission to the other side a new timeout value.
Args:
timeout: A positive float used as the new timeout value for the operation
to be transmitted to the other side.
"""
raise NotImplementedError()
@abc.abstractmethod
def allowance(self, allowance):
"""Indicates to this manager that the remote customer is allowing payloads.
Args:
allowance: A positive integer indicating the number of additional payloads
the remote customer is allowing to be transmitted from this side of the
operation.
"""
raise NotImplementedError()
@abc.abstractmethod
def remote_complete(self):
"""Indicates to this manager that data from the remote side is complete."""
raise NotImplementedError()
@abc.abstractmethod
def abort(self, outcome):
"""Indicates that the operation has aborted.
Args:
outcome: A base.Outcome for the operation. If None, indicates that the
operation abortion should not be communicated to the other side of the
operation.
"""
raise NotImplementedError()
class ExpirationManager(six.with_metaclass(abc.ABCMeta)):
"""A manager responsible for aborting the operation if it runs out of time."""
@abc.abstractmethod
def change_timeout(self, timeout):
"""Changes the timeout allotted for the operation.
Operation duration is always measure from the beginning of the operation;
calling this method changes the operation's allotted time to timeout total
seconds, not timeout seconds from the time of this method call.
Args:
timeout: A length of time in seconds to allow for the operation.
"""
raise NotImplementedError()
@abc.abstractmethod
def deadline(self):
"""Returns the time until which the operation is allowed to run.
Returns:
The time (seconds since the epoch) at which the operation will expire.
"""
raise NotImplementedError()
@abc.abstractmethod
def terminate(self):
"""Indicates to this manager that the operation has terminated."""
raise NotImplementedError()
class ProtocolManager(six.with_metaclass(abc.ABCMeta)):
"""A manager of protocol-specific values passing through an operation."""
@abc.abstractmethod
def set_protocol_receiver(self, protocol_receiver):
"""Registers the customer object that will receive protocol objects.
Args:
protocol_receiver: A base.ProtocolReceiver to which protocol objects for
the operation should be passed.
"""
raise NotImplementedError()
@abc.abstractmethod
def accept_protocol_context(self, protocol_context):
"""Accepts the protocol context object for the operation.
Args:
protocol_context: An object designated for use as the protocol context
of the operation, with further semantics implementation-determined.
"""
raise NotImplementedError()
class EmissionManager(six.with_metaclass(abc.ABCMeta, base.Operator)):
"""A manager of values emitted by customer code."""
@abc.abstractmethod
def advance(
self, initial_metadata=None, payload=None, completion=None,
allowance=None):
"""Accepts a value emitted by customer code.
This method should only be called by customer code.
Args:
initial_metadata: An initial metadata value emitted by the local customer
to be sent to the other side of the operation.
payload: A payload value emitted by the local customer to be sent to the
other side of the operation.
completion: A Completion value emitted by the local customer to be sent to
the other side of the operation.
allowance: A positive integer indicating an additional number of payloads
that the local customer is willing to accept from the other side of the
operation.
"""
raise NotImplementedError()
class IngestionManager(six.with_metaclass(abc.ABCMeta)):
"""A manager responsible for executing customer code.
This name of this manager comes from its responsibility to pass successive
values from the other side of the operation into the code of the local
customer.
"""
@abc.abstractmethod
def set_group_and_method(self, group, method):
"""Communicates to this IngestionManager the operation group and method.
Args:
group: The group identifier of the operation.
method: The method identifier of the operation.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_local_allowance(self, allowance):
"""Communicates to this IngestionManager that more payloads may be ingested.
Args:
allowance: A positive integer indicating an additional number of payloads
that the local customer is willing to ingest.
"""
raise NotImplementedError()
@abc.abstractmethod
def local_emissions_done(self):
"""Indicates to this manager that local emissions are done."""
raise NotImplementedError()
@abc.abstractmethod
def advance(self, initial_metadata, payload, completion, allowance):
"""Advances the operation by passing values to the local customer."""
raise NotImplementedError()
class ReceptionManager(six.with_metaclass(abc.ABCMeta)):
"""A manager responsible for receiving tickets from the other end."""
@abc.abstractmethod
def receive_ticket(self, ticket):
"""Handle a ticket from the other side of the operation.
Args:
ticket: A links.Ticket for the operation.
"""
raise NotImplementedError()
class Operation(six.with_metaclass(abc.ABCMeta)):
"""An ongoing operation.
Attributes:
context: A base.OperationContext object for the operation.
operator: A base.Operator object for the operation for use by the customer
of the operation.
"""
@abc.abstractmethod
def handle_ticket(self, ticket):
"""Handle a ticket from the other side of the operation.
Args:
ticket: A links.Ticket from the other side of the operation.
"""
raise NotImplementedError()
@abc.abstractmethod
def abort(self, outcome_kind):
"""Aborts the operation.
Args:
outcome_kind: A base.Outcome.Kind value indicating operation abortion.
"""
raise NotImplementedError()
|
|
import os.path
import logging as log
from types import StringType, LongType, IntType, ListType, DictType
ints = (LongType, IntType)
strings = (StringType,unicode)
from re import compile
from sha import sha
from hashlib import md5
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
def determine_torrent_name(files):
# if they gave us a single path and it was a dir
# than use the dir name as the torrent name
if len(files) == 1 and os.path.isdir(files[0]):
name = os.path.basename(files[0])
# if for some reason we did not get an abs path
# than use what we have
name = files[0].replace(os.sep,'')
log.debug('name: %s',name)
return name
return None
def validate_info_data(info_data):
""" raises exceptions if data is bad """
reg = compile(r'^[^/\\.~][^/\\]*$')
# we must represent the info as a dict
if type(info_data) != DictType:
raise ValueError('invalid info: data must be a dictionary')
# make sure our pieces are a string % 20
pieces = info_data.get('pieces')
if type(pieces) != StringType or len(pieces) % 20 != 0:
raise ValueError('invalid info: bad piece key')
# check our torrent's name
name = info_data.get('name')
log.debug('validating name: %s',name)
if type(name) != StringType:
raise ValueError('invalid info: bad name :: %s' % name)
# check our security regex against the name
if not reg.match(name):
raise ValueError('invalid info: bad name for security reasons')
# we can't have both a files list and a length value
if 'files' in info_data and 'length' in info_data:
raise ValueError('invalid info: single/multiple info')
if 'files' in info_data:
files = info_data.get('files')
# our files must be a list
if type(files) != ListType:
raise ValueError('invalid info: files must be list')
# check each of our sub files
duplicate_check = {}
for file_data in files:
# they are represented as dicts
if type(file_data) is not DictType:
raise ValueError('invalid info: file data must be dict')
# they have an int non 0 length
length = file_data.get('length')
if type(length) not in ints or length < 0:
raise ValueError('invalid info: bad file length')
# our path must be secure and a list of strings
path = file_data.get('path')
if type(path) != ListType or path == []:
raise ValueError('invalid info: bad file path :: %s' % path)
# check our path dirs, secure strings
for path_piece in path:
if type(path_piece) not in strings:
raise ValueError('invalid info: bad path dir: %s', path_piece)
if not reg.match(path_piece):
raise ValueError('invalid info: insecure path dir')
# make sure we haven't seen this guy before
if tuple(path) in duplicate_check:
raise ValueError('invalid info: duplicate path')
else:
duplicate_check[tuple(path)] = True
# if we are a single file we will have a length
# represented as an int
else:
length = info_data.get('length')
if type(length) not in ints or length < 0:
raise ValueError('invalid info: bad length')
return True
def convert_unicode(s,encoding):
try:
if type(s) is ListType:
s = [unicode(x,encoding) for x in s]
else:
s = unicode(s,encoding)
except UnicodeError:
raise UnicodeError('bad filename: %s' % s)
return s
def find_files(path,extension=None,exclude=None):
""" returns abs list of paths found recursively
searching from passed path. excluding paths
which contain exclude arg and only including
paths which meet the extension arg """
# absolute paths
found = []
for dir_path, dir_names, file_names in os.walk(path,followlinks=True):
# remove paths which include the exclude string
# this will keep them from being traversed
if exclude:
bad_dirs = [x for x in dir_names if exclude in dir_names]
map(dir_names.remove,bad_dirs)
# see if any of the current files meet our
# extensio and exclude criteria
for name in file_names:
if (not extension or x.endswith(extension)) \
and (not exclude or exclude not in x):
found.append(os.path.join(dir_path,name))
return [os.path.abspath(path) for path in found]
def get_file_name(path,rel_file_base=None):
""" guesses from the path what the name should be,
expects paths to be relative or base to be
provided """
base = rel_file_base or ''
if path.startswith(base):
path = path[len(base):]
pieces = [x for x in path.split(os.sep) if x.strip()]
if len(pieces) == 1:
name = pieces[0]
else:
# we should receive a bas if these paths
# are not relative
name = pieces
log.debug('get_name: %s %s',path,name)
return name
def get_common_name(paths,rel_file_base=None):
""" returns the basename of the common prefix
if there are more than one paths. """
base = rel_file_base or ''
if len(paths) == 1:
# we shouldn't find ourin this case,
# but we'll go ahead and do the deed anyway
name = get_file_name(paths[0],rel_file_base)
else:
# see if they share a common prefix
prefix = os.path.commonprefix(paths)
# see if we are left w/ anything after we take out the base
if prefix.startswith(base):
prefix = prefix[len(base):]
if prefix.endswith(os.sep):
prefix = prefix[:-1]
# if they do than lets go ahead and make the base of that the name
if prefix:
name = os.path.basename(prefix)
log.debug('name: %s',name)
# if they don't than we are going to return None
else:
name = None
log.debug('get_common_name: %s %s' % (paths,name))
return name
def determine_file_sizes(file_paths):
""" recursively walks through files / dirs return lookup of sizes """
to_return = {}
for path in file_paths:
to_return[path] = os.path.getsize(path)
return to_return
def md5sum(path):
log.debug('creating md5: %s',path)
# create the md5 for the given file
if not os.path.exists(path):
raise Exception('File not found')
file_sum = md5()
with file(path,'rb') as fh:
while True:
chunk = fh.read(128)
if not chunk:
break
file_sum.update(chunk)
digest = file_sum.digest()
return digest
def determine_piece_size(total_size):
exponent = 15 # < 4mb, 32k pieces
if total_size > 8L*1024*1024*1024: # > 8gb, 2mb pieces
exponent = 21
elif total_size > 2L*1024*1024*1024: # > 2gb, 1mb pieces
exponent = 20
elif total_size > 512L*1024*1024: # > 512mb, 512k pieces
exponent = 19
elif total_size > 64L*1024*1024: # > 64mb, 265k pieces
exponent = 18
elif total_size > 16L*1024*1024: # > 16mb, 128k pieces
exponent = 17
elif total_size > 4L*1024*1024: # > 4mb, 64k pieces
exponent = 16
return 2 ** exponent
|
|
# Copyright 2014-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import codecs
import types
from .mesonlib import MesonException
from . import mlog
# This is the regex for the supported escape sequences of a regular string
# literal, like 'abc\x00'
ESCAPE_SEQUENCE_SINGLE_RE = re.compile(r'''
( \\U........ # 8-digit hex escapes
| \\u.... # 4-digit hex escapes
| \\x.. # 2-digit hex escapes
| \\[0-7]{1,3} # Octal escapes
| \\N\{[^}]+\} # Unicode characters by name
| \\[\\'abfnrtv] # Single-character escapes
)''', re.UNICODE | re.VERBOSE)
class MesonUnicodeDecodeError(MesonException):
def __init__(self, match):
super().__init__("%s" % match)
self.match = match
def decode_match(match):
try:
return codecs.decode(match.group(0), 'unicode_escape')
except UnicodeDecodeError as err:
raise MesonUnicodeDecodeError(match.group(0))
class ParseException(MesonException):
def __init__(self, text, line, lineno, colno):
# Format as error message, followed by the line with the error, followed by a caret to show the error column.
super().__init__("%s\n%s\n%s" % (text, line, '%s^' % (' ' * colno)))
self.lineno = lineno
self.colno = colno
class BlockParseException(MesonException):
def __init__(self, text, line, lineno, colno, start_line, start_lineno, start_colno):
# This can be formatted in two ways - one if the block start and end are on the same line, and a different way if they are on different lines.
if lineno == start_lineno:
# If block start and end are on the same line, it is formatted as:
# Error message
# Followed by the line with the error
# Followed by a caret to show the block start
# Followed by underscores
# Followed by a caret to show the block end.
super().__init__("%s\n%s\n%s" % (text, line, '%s^%s^' % (' ' * start_colno, '_' * (colno - start_colno - 1))))
else:
# If block start and end are on different lines, it is formatted as:
# Error message
# Followed by the line with the error
# Followed by a caret to show the error column.
# Followed by a message saying where the block started.
# Followed by the line of the block start.
# Followed by a caret for the block start.
super().__init__("%s\n%s\n%s\nFor a block that started at %d,%d\n%s\n%s" % (text, line, '%s^' % (' ' * colno), start_lineno, start_colno, start_line, "%s^" % (' ' * start_colno)))
self.lineno = lineno
self.colno = colno
class Token:
def __init__(self, tid, subdir, line_start, lineno, colno, bytespan, value):
self.tid = tid
self.subdir = subdir
self.line_start = line_start
self.lineno = lineno
self.colno = colno
self.bytespan = bytespan
self.value = value
def __eq__(self, other):
if isinstance(other, str):
return self.tid == other
return self.tid == other.tid
class Lexer:
def __init__(self, code):
self.code = code
self.keywords = {'true', 'false', 'if', 'else', 'elif',
'endif', 'and', 'or', 'not', 'foreach', 'endforeach',
'in', 'continue', 'break'}
self.future_keywords = {'return'}
self.token_specification = [
# Need to be sorted longest to shortest.
('ignore', re.compile(r'[ \t]')),
('id', re.compile('[_a-zA-Z][_0-9a-zA-Z]*')),
('number', re.compile(r'0[bB][01]+|0[oO][0-7]+|0[xX][0-9a-fA-F]+|0|[1-9]\d*')),
('eol_cont', re.compile(r'\\\n')),
('eol', re.compile(r'\n')),
('multiline_string', re.compile(r"'''(.|\n)*?'''", re.M)),
('comment', re.compile(r'#.*')),
('lparen', re.compile(r'\(')),
('rparen', re.compile(r'\)')),
('lbracket', re.compile(r'\[')),
('rbracket', re.compile(r'\]')),
('lcurl', re.compile(r'\{')),
('rcurl', re.compile(r'\}')),
('dblquote', re.compile(r'"')),
('string', re.compile(r"'([^'\\]|(\\.))*'")),
('comma', re.compile(r',')),
('plusassign', re.compile(r'\+=')),
('dot', re.compile(r'\.')),
('plus', re.compile(r'\+')),
('dash', re.compile(r'-')),
('star', re.compile(r'\*')),
('percent', re.compile(r'%')),
('fslash', re.compile(r'/')),
('colon', re.compile(r':')),
('equal', re.compile(r'==')),
('nequal', re.compile(r'!=')),
('assign', re.compile(r'=')),
('le', re.compile(r'<=')),
('lt', re.compile(r'<')),
('ge', re.compile(r'>=')),
('gt', re.compile(r'>')),
('questionmark', re.compile(r'\?')),
]
def getline(self, line_start):
return self.code[line_start:self.code.find('\n', line_start)]
def lex(self, subdir):
line_start = 0
lineno = 1
loc = 0
par_count = 0
bracket_count = 0
curl_count = 0
col = 0
while loc < len(self.code):
matched = False
value = None
for (tid, reg) in self.token_specification:
mo = reg.match(self.code, loc)
if mo:
curline = lineno
curline_start = line_start
col = mo.start() - line_start
matched = True
span_start = loc
loc = mo.end()
span_end = loc
bytespan = (span_start, span_end)
match_text = mo.group()
if tid == 'ignore' or tid == 'comment':
break
elif tid == 'lparen':
par_count += 1
elif tid == 'rparen':
par_count -= 1
elif tid == 'lbracket':
bracket_count += 1
elif tid == 'rbracket':
bracket_count -= 1
elif tid == 'lcurl':
curl_count += 1
elif tid == 'rcurl':
curl_count -= 1
elif tid == 'dblquote':
raise ParseException('Double quotes are not supported. Use single quotes.', self.getline(line_start), lineno, col)
elif tid == 'string':
# Handle here and not on the regexp to give a better error message.
if match_text.find("\n") != -1:
mlog.warning("""Newline character in a string detected, use ''' (three single quotes) for multiline strings instead.
This will become a hard error in a future Meson release.""", self.getline(line_start), lineno, col)
value = match_text[1:-1]
try:
value = ESCAPE_SEQUENCE_SINGLE_RE.sub(decode_match, value)
except MesonUnicodeDecodeError as err:
raise MesonException("Failed to parse escape sequence: '{}' in string:\n {}".format(err.match, match_text))
elif tid == 'multiline_string':
tid = 'string'
value = match_text[3:-3]
lines = match_text.split('\n')
if len(lines) > 1:
lineno += len(lines) - 1
line_start = mo.end() - len(lines[-1])
elif tid == 'number':
value = int(match_text, base=0)
elif tid == 'eol' or tid == 'eol_cont':
lineno += 1
line_start = loc
if par_count > 0 or bracket_count > 0 or curl_count > 0:
break
elif tid == 'id':
if match_text in self.keywords:
tid = match_text
else:
if match_text in self.future_keywords:
mlog.warning("Identifier '{}' will become a reserved keyword in a future release. Please rename it.".format(match_text),
location=types.SimpleNamespace(subdir=subdir, lineno=lineno))
value = match_text
yield Token(tid, subdir, curline_start, curline, col, bytespan, value)
break
if not matched:
raise ParseException('lexer', self.getline(line_start), lineno, col)
class ElementaryNode:
def __init__(self, token):
self.lineno = token.lineno
self.subdir = token.subdir
self.colno = token.colno
self.value = token.value
self.bytespan = token.bytespan
class BooleanNode(ElementaryNode):
def __init__(self, token, value):
super().__init__(token)
self.value = value
assert(isinstance(self.value, bool))
class IdNode(ElementaryNode):
def __init__(self, token):
super().__init__(token)
assert(isinstance(self.value, str))
def __str__(self):
return "Id node: '%s' (%d, %d)." % (self.value, self.lineno, self.colno)
class NumberNode(ElementaryNode):
def __init__(self, token):
super().__init__(token)
assert(isinstance(self.value, int))
class StringNode(ElementaryNode):
def __init__(self, token):
super().__init__(token)
assert(isinstance(self.value, str))
def __str__(self):
return "String node: '%s' (%d, %d)." % (self.value, self.lineno, self.colno)
class ContinueNode(ElementaryNode):
pass
class BreakNode(ElementaryNode):
pass
class ArrayNode:
def __init__(self, args):
self.subdir = args.subdir
self.lineno = args.lineno
self.colno = args.colno
self.args = args
class DictNode:
def __init__(self, args):
self.subdir = args.subdir
self.lineno = args.lineno
self.colno = args.colno
self.args = args
class EmptyNode:
def __init__(self, lineno, colno):
self.subdir = ''
self.lineno = lineno
self.colno = colno
self.value = None
class OrNode:
def __init__(self, left, right):
self.subdir = left.subdir
self.lineno = left.lineno
self.colno = left.colno
self.left = left
self.right = right
class AndNode:
def __init__(self, left, right):
self.subdir = left.subdir
self.lineno = left.lineno
self.colno = left.colno
self.left = left
self.right = right
class ComparisonNode:
def __init__(self, ctype, left, right):
self.lineno = left.lineno
self.colno = left.colno
self.subdir = left.subdir
self.left = left
self.right = right
self.ctype = ctype
class ArithmeticNode:
def __init__(self, operation, left, right):
self.subdir = left.subdir
self.lineno = left.lineno
self.colno = left.colno
self.left = left
self.right = right
self.operation = operation
class NotNode:
def __init__(self, location_node, value):
self.subdir = location_node.subdir
self.lineno = location_node.lineno
self.colno = location_node.colno
self.value = value
class CodeBlockNode:
def __init__(self, location_node):
self.subdir = location_node.subdir
self.lineno = location_node.lineno
self.colno = location_node.colno
self.lines = []
class IndexNode:
def __init__(self, iobject, index):
self.iobject = iobject
self.index = index
self.subdir = iobject.subdir
self.lineno = iobject.lineno
self.colno = iobject.colno
class MethodNode:
def __init__(self, subdir, lineno, colno, source_object, name, args):
self.subdir = subdir
self.lineno = lineno
self.colno = colno
self.source_object = source_object
self.name = name
assert(isinstance(self.name, str))
self.args = args
class FunctionNode:
def __init__(self, subdir, lineno, colno, func_name, args):
self.subdir = subdir
self.lineno = lineno
self.colno = colno
self.func_name = func_name
assert(isinstance(func_name, str))
self.args = args
class AssignmentNode:
def __init__(self, lineno, colno, var_name, value):
self.lineno = lineno
self.colno = colno
self.var_name = var_name
assert(isinstance(var_name, str))
self.value = value
class PlusAssignmentNode:
def __init__(self, lineno, colno, var_name, value):
self.lineno = lineno
self.colno = colno
self.var_name = var_name
assert(isinstance(var_name, str))
self.value = value
class ForeachClauseNode:
def __init__(self, lineno, colno, varnames, items, block):
self.lineno = lineno
self.colno = colno
self.varnames = varnames
self.items = items
self.block = block
class IfClauseNode:
def __init__(self, lineno, colno):
self.lineno = lineno
self.colno = colno
self.ifs = []
self.elseblock = EmptyNode(lineno, colno)
class UMinusNode:
def __init__(self, current_location, value):
self.subdir = current_location.subdir
self.lineno = current_location.lineno
self.colno = current_location.colno
self.value = value
class IfNode:
def __init__(self, lineno, colno, condition, block):
self.lineno = lineno
self.colno = colno
self.condition = condition
self.block = block
class TernaryNode:
def __init__(self, lineno, colno, condition, trueblock, falseblock):
self.lineno = lineno
self.colno = colno
self.condition = condition
self.trueblock = trueblock
self.falseblock = falseblock
class ArgumentNode:
def __init__(self, token):
self.lineno = token.lineno
self.colno = token.colno
self.subdir = token.subdir
self.arguments = []
self.commas = []
self.kwargs = {}
self.order_error = False
def prepend(self, statement):
if self.num_kwargs() > 0:
self.order_error = True
if not isinstance(statement, EmptyNode):
self.arguments = [statement] + self.arguments
def append(self, statement):
if self.num_kwargs() > 0:
self.order_error = True
if not isinstance(statement, EmptyNode):
self.arguments += [statement]
def set_kwarg(self, name, value):
if name in self.kwargs:
mlog.warning('Keyword argument "{}" defined multiple times.'.format(name), location=self)
mlog.warning('This will be an error in future Meson releases.')
self.kwargs[name] = value
def num_args(self):
return len(self.arguments)
def num_kwargs(self):
return len(self.kwargs)
def incorrect_order(self):
return self.order_error
def __len__(self):
return self.num_args() # Fixme
comparison_map = {'equal': '==',
'nequal': '!=',
'lt': '<',
'le': '<=',
'gt': '>',
'ge': '>=',
'in': 'in',
'notin': 'not in',
}
# Recursive descent parser for Meson's definition language.
# Very basic apart from the fact that we have many precedence
# levels so there are not enough words to describe them all.
# Enter numbering:
#
# 1 assignment
# 2 or
# 3 and
# 4 comparison
# 5 arithmetic
# 6 negation
# 7 funcall, method call
# 8 parentheses
# 9 plain token
class Parser:
def __init__(self, code, subdir):
self.lexer = Lexer(code)
self.stream = self.lexer.lex(subdir)
self.current = Token('eof', '', 0, 0, 0, (0, 0), None)
self.getsym()
self.in_ternary = False
def getsym(self):
try:
self.current = next(self.stream)
except StopIteration:
self.current = Token('eof', '', self.current.line_start, self.current.lineno, self.current.colno + self.current.bytespan[1] - self.current.bytespan[0], (0, 0), None)
def getline(self):
return self.lexer.getline(self.current.line_start)
def accept(self, s):
if self.current.tid == s:
self.getsym()
return True
return False
def expect(self, s):
if self.accept(s):
return True
raise ParseException('Expecting %s got %s.' % (s, self.current.tid), self.getline(), self.current.lineno, self.current.colno)
def block_expect(self, s, block_start):
if self.accept(s):
return True
raise BlockParseException('Expecting %s got %s.' % (s, self.current.tid), self.getline(), self.current.lineno, self.current.colno, self.lexer.getline(block_start.line_start), block_start.lineno, block_start.colno)
def parse(self):
block = self.codeblock()
self.expect('eof')
return block
def statement(self):
return self.e1()
def e1(self):
left = self.e2()
if self.accept('plusassign'):
value = self.e1()
if not isinstance(left, IdNode):
raise ParseException('Plusassignment target must be an id.', self.getline(), left.lineno, left.colno)
return PlusAssignmentNode(left.lineno, left.colno, left.value, value)
elif self.accept('assign'):
value = self.e1()
if not isinstance(left, IdNode):
raise ParseException('Assignment target must be an id.',
self.getline(), left.lineno, left.colno)
return AssignmentNode(left.lineno, left.colno, left.value, value)
elif self.accept('questionmark'):
if self.in_ternary:
raise ParseException('Nested ternary operators are not allowed.',
self.getline(), left.lineno, left.colno)
self.in_ternary = True
trueblock = self.e1()
self.expect('colon')
falseblock = self.e1()
self.in_ternary = False
return TernaryNode(left.lineno, left.colno, left, trueblock, falseblock)
return left
def e2(self):
left = self.e3()
while self.accept('or'):
if isinstance(left, EmptyNode):
raise ParseException('Invalid or clause.',
self.getline(), left.lineno, left.colno)
left = OrNode(left, self.e3())
return left
def e3(self):
left = self.e4()
while self.accept('and'):
if isinstance(left, EmptyNode):
raise ParseException('Invalid and clause.',
self.getline(), left.lineno, left.colno)
left = AndNode(left, self.e4())
return left
def e4(self):
left = self.e5()
for nodename, operator_type in comparison_map.items():
if self.accept(nodename):
return ComparisonNode(operator_type, left, self.e5())
if self.accept('not') and self.accept('in'):
return ComparisonNode('notin', left, self.e5())
return left
def e5(self):
return self.e5add()
def e5add(self):
left = self.e5sub()
if self.accept('plus'):
return ArithmeticNode('add', left, self.e5add())
return left
def e5sub(self):
left = self.e5mod()
if self.accept('dash'):
return ArithmeticNode('sub', left, self.e5sub())
return left
def e5mod(self):
left = self.e5mul()
if self.accept('percent'):
return ArithmeticNode('mod', left, self.e5mod())
return left
def e5mul(self):
left = self.e5div()
if self.accept('star'):
return ArithmeticNode('mul', left, self.e5mul())
return left
def e5div(self):
left = self.e6()
if self.accept('fslash'):
return ArithmeticNode('div', left, self.e5div())
return left
def e6(self):
if self.accept('not'):
return NotNode(self.current, self.e7())
if self.accept('dash'):
return UMinusNode(self.current, self.e7())
return self.e7()
def e7(self):
left = self.e8()
block_start = self.current
if self.accept('lparen'):
args = self.args()
self.block_expect('rparen', block_start)
if not isinstance(left, IdNode):
raise ParseException('Function call must be applied to plain id',
self.getline(), left.lineno, left.colno)
left = FunctionNode(left.subdir, left.lineno, left.colno, left.value, args)
go_again = True
while go_again:
go_again = False
if self.accept('dot'):
go_again = True
left = self.method_call(left)
if self.accept('lbracket'):
go_again = True
left = self.index_call(left)
return left
def e8(self):
block_start = self.current
if self.accept('lparen'):
e = self.statement()
self.block_expect('rparen', block_start)
return e
elif self.accept('lbracket'):
args = self.args()
self.block_expect('rbracket', block_start)
return ArrayNode(args)
elif self.accept('lcurl'):
key_values = self.key_values()
self.block_expect('rcurl', block_start)
return DictNode(key_values)
else:
return self.e9()
def e9(self):
t = self.current
if self.accept('true'):
return BooleanNode(t, True)
if self.accept('false'):
return BooleanNode(t, False)
if self.accept('id'):
return IdNode(t)
if self.accept('number'):
return NumberNode(t)
if self.accept('string'):
return StringNode(t)
return EmptyNode(self.current.lineno, self.current.colno)
def key_values(self):
s = self.statement()
a = ArgumentNode(s)
while not isinstance(s, EmptyNode):
potential = self.current
if self.accept('colon'):
if not isinstance(s, StringNode):
raise ParseException('Key must be a string.',
self.getline(), s.lineno, s.colno)
if s.value in a.kwargs:
# + 1 to colno to point to the actual string, not the opening quote
raise ParseException('Duplicate dictionary key: {}'.format(s.value),
self.getline(), s.lineno, s.colno + 1)
a.set_kwarg(s.value, self.statement())
potential = self.current
if not self.accept('comma'):
return a
a.commas.append(potential)
else:
raise ParseException('Only key:value pairs are valid in dict construction.',
self.getline(), s.lineno, s.colno)
s = self.statement()
return a
def args(self):
s = self.statement()
a = ArgumentNode(s)
while not isinstance(s, EmptyNode):
potential = self.current
if self.accept('comma'):
a.commas.append(potential)
a.append(s)
elif self.accept('colon'):
if not isinstance(s, IdNode):
raise ParseException('Dictionary key must be a plain identifier.',
self.getline(), s.lineno, s.colno)
a.set_kwarg(s.value, self.statement())
potential = self.current
if not self.accept('comma'):
return a
a.commas.append(potential)
else:
a.append(s)
return a
s = self.statement()
return a
def method_call(self, source_object):
methodname = self.e9()
if not(isinstance(methodname, IdNode)):
raise ParseException('Method name must be plain id',
self.getline(), self.current.lineno, self.current.colno)
self.expect('lparen')
args = self.args()
self.expect('rparen')
method = MethodNode(methodname.subdir, methodname.lineno, methodname.colno, source_object, methodname.value, args)
if self.accept('dot'):
return self.method_call(method)
return method
def index_call(self, source_object):
index_statement = self.statement()
self.expect('rbracket')
return IndexNode(source_object, index_statement)
def foreachblock(self):
t = self.current
self.expect('id')
varname = t
varnames = [t]
if self.accept('comma'):
t = self.current
self.expect('id')
varnames.append(t)
self.expect('colon')
items = self.statement()
block = self.codeblock()
return ForeachClauseNode(varname.lineno, varname.colno, varnames, items, block)
def ifblock(self):
condition = self.statement()
clause = IfClauseNode(condition.lineno, condition.colno)
self.expect('eol')
block = self.codeblock()
clause.ifs.append(IfNode(clause.lineno, clause.colno, condition, block))
self.elseifblock(clause)
clause.elseblock = self.elseblock()
return clause
def elseifblock(self, clause):
while self.accept('elif'):
s = self.statement()
self.expect('eol')
b = self.codeblock()
clause.ifs.append(IfNode(s.lineno, s.colno, s, b))
def elseblock(self):
if self.accept('else'):
self.expect('eol')
return self.codeblock()
def line(self):
block_start = self.current
if self.current == 'eol':
return EmptyNode(self.current.lineno, self.current.colno)
if self.accept('if'):
block = self.ifblock()
self.block_expect('endif', block_start)
return block
if self.accept('foreach'):
block = self.foreachblock()
self.block_expect('endforeach', block_start)
return block
if self.accept('continue'):
return ContinueNode(self.current)
if self.accept('break'):
return BreakNode(self.current)
return self.statement()
def codeblock(self):
block = CodeBlockNode(self.current)
cond = True
while cond:
curline = self.line()
if not isinstance(curline, EmptyNode):
block.lines.append(curline)
cond = self.accept('eol')
return block
|
|
"""
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import
import os
import pytest
import tarfile
import zipfile
from atomic_reactor.constants import PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY
from atomic_reactor.plugins.post_export_operator_manifests import ExportOperatorManifestsPlugin
from atomic_reactor.plugin import PluginFailedException
from docker.errors import NotFound
from flexmock import flexmock
from functools import partial
from platform import machine
from tests.mock_env import MockEnv
from requests import Response
pytestmark = pytest.mark.usefixtures('user_params')
CONTAINER_ID = 'mocked'
def mock_dockerfile(
tmpdir,
has_appregistry_label=False, appregistry_label=False,
has_bundle_label=True, bundle_label=True
):
base = 'From fedora:30'
cmd = 'CMD /bin/cowsay moo'
operator_appregistry_label = ''
operator_bundle_label = ''
if has_appregistry_label:
operator_appregistry_label = 'LABEL com.redhat.delivery.appregistry={}'.format(
str(appregistry_label).lower())
if has_bundle_label:
operator_bundle_label = 'LABEL com.redhat.delivery.operator.bundle={}'.format(
str(bundle_label).lower())
data = '\n'.join([base, operator_appregistry_label, operator_bundle_label, cmd])
tmpdir.join('Dockerfile').write(data)
def generate_archive(tmpdir, empty=False):
archive_path = os.path.join(str(tmpdir), 'temp.tar')
archive_tar = tarfile.open(archive_path, 'w')
manifests_dir = os.path.join(str(tmpdir), 'manifests')
os.mkdir(manifests_dir)
another_dir = os.path.join(manifests_dir, 'another_dir')
os.mkdir(another_dir)
if not empty:
open(os.path.join(manifests_dir, 'stub.yml'), 'w').close()
open(os.path.join(another_dir, 'yayml.yml'), 'w').close()
archive_tar.add(manifests_dir, arcname='manifests')
archive_tar.close()
f = open(archive_path, 'rb')
for block in iter(partial(f.read, 8), b''):
yield block
f.close()
os.unlink(archive_path)
def mock_env(tmpdir, docker_tasker,
has_appregistry_label=False, appregistry_label=False,
has_bundle_label=True, bundle_label=True,
has_archive=True,
scratch=False, orchestrator=False, selected_platform=True, empty_archive=False,
remove_fails=False):
mock_dockerfile(
tmpdir,
has_appregistry_label=has_appregistry_label, appregistry_label=appregistry_label,
has_bundle_label=has_bundle_label, bundle_label=bundle_label
)
env = (MockEnv()
.for_plugin('postbuild', ExportOperatorManifestsPlugin.key)
.set_scratch(scratch))
if orchestrator:
env.make_orchestrator()
env.workflow.builder.set_df_path(str(tmpdir))
mock_stream = generate_archive(tmpdir, empty_archive)
if selected_platform:
env.set_plugin_args({'operator_manifests_extract_platform': machine(),
'platform': machine()})
(flexmock(docker_tasker.tasker.d.wrapped)
.should_receive('create_container')
.with_args(env.workflow.image, command=["/bin/bash"])
.and_return({'Id': CONTAINER_ID}))
if remove_fails:
(flexmock(docker_tasker.tasker.d.wrapped)
.should_receive('remove_container')
.with_args(CONTAINER_ID)
.and_raise(Exception('error')))
else:
(flexmock(docker_tasker.tasker.d.wrapped)
.should_receive('remove_container')
.with_args(CONTAINER_ID))
if has_archive:
(flexmock(docker_tasker.tasker.d.wrapped)
.should_receive('get_archive')
.with_args(CONTAINER_ID, '/manifests')
.and_return(mock_stream, {}))
elif has_archive is not None:
response = Response()
response.status_code = 404
(flexmock(docker_tasker.tasker.d.wrapped)
.should_receive('get_archive')
.with_args(CONTAINER_ID, '/manifests')
.and_raise(NotFound('Not found', response=response)))
else:
response = Response()
response.status_code = 500
(flexmock(docker_tasker.tasker.d.wrapped)
.should_receive('get_archive')
.with_args(CONTAINER_ID, '/manifests')
.and_raise(Exception('error')))
return env.create_runner(docker_tasker)
class TestExportOperatorManifests(object):
@pytest.mark.parametrize('scratch', [True, False])
@pytest.mark.parametrize('has_appregistry_label', [True, False])
@pytest.mark.parametrize('appregistry_label', [True, False])
@pytest.mark.parametrize('has_bundle_label', [True, False])
@pytest.mark.parametrize('bundle_label', [True, False])
@pytest.mark.parametrize('orchestrator', [True, False])
@pytest.mark.parametrize('selected_platform', [True, False])
def test_skip(self, docker_tasker, tmpdir, caplog, scratch,
has_appregistry_label, appregistry_label,
has_bundle_label, bundle_label,
orchestrator, selected_platform):
runner = mock_env(
tmpdir, docker_tasker,
has_appregistry_label=has_appregistry_label,
has_bundle_label=has_bundle_label, bundle_label=bundle_label,
appregistry_label=appregistry_label,
scratch=scratch,
orchestrator=orchestrator, selected_platform=selected_platform
)
result = runner.run()
if any([
not (
(has_appregistry_label and appregistry_label) or
(has_bundle_label and bundle_label)
),
scratch,
orchestrator,
not selected_platform
]):
assert 'Skipping' in caplog.text
assert result[PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY] is None
else:
assert 'Skipping' not in caplog.text
def test_export_archive(self, docker_tasker, tmpdir):
runner = mock_env(tmpdir, docker_tasker)
result = runner.run()
archive = result[PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY]
assert archive
assert archive.split('/')[-1] == 'operator_manifests.zip'
assert zipfile.is_zipfile(archive)
expected = ['stub.yml', 'another_dir/yayml.yml']
with zipfile.ZipFile(archive, 'r') as z:
assert len(z.namelist()) == len(expected)
assert sorted(z.namelist()) == sorted(expected)
@pytest.mark.parametrize('remove_fails', [True, False])
@pytest.mark.parametrize('has_archive', [True, False, None])
def test_no_archive(self, docker_tasker, tmpdir, caplog, remove_fails, has_archive):
runner = mock_env(tmpdir, docker_tasker, has_archive=has_archive,
remove_fails=remove_fails)
if has_archive:
runner.run()
if remove_fails:
assert 'Failed to remove container' in caplog.text
else:
with pytest.raises(PluginFailedException) as exc:
runner.run()
if not has_archive:
assert 'Could not extract operator manifest files' in caplog.text
assert 'Could not extract operator manifest files' in str(exc.value)
if remove_fails:
assert 'Failed to remove container' in caplog.text
@pytest.mark.parametrize('empty_archive', [True, False])
def test_emty_manifests_dir(self, docker_tasker, tmpdir, caplog, empty_archive):
runner = mock_env(tmpdir, docker_tasker, empty_archive=empty_archive)
if empty_archive:
with pytest.raises(PluginFailedException) as exc:
runner.run()
assert 'Empty operator manifests directory' in caplog.text
assert 'Empty operator manifests directory' in str(exc.value)
else:
runner.run()
|
|
import numpy as n
from .afg import AFG_channel, Arb
def sync(length, amp=16382):
s = n.zeros((length,), dtype=n.uint16)
s[1::2] = 1
return s * amp
def zigzagify(A):
"""reshape a 2d array to have alternate rows going backward"""
A[1::2] = A[1::2][:,::-1]
def pairify(a):
""" return an array that's twice as long, with each value duplicated"""
return n.asarray((a,a)).T.ravel()
def make_waveforms(X, Y, zigzag=False):
x = pairify(n.array(X, dtype=n.uint16))
y = n.array(Y, dtype=n.uint16)
X,Y = n.meshgrid(x,y)
assert X.size == Y.size <= 2**17
if zigzag:
zigzagify(X)
return X.ravel(), Y.ravel(), sync(X.size)
# load it
def make_and_load_waveforms(X, Y, t, det_afg, rf_afg):
det_arb = Arb(det_afg)
rf_arb = Arb(rf_afg)
s_ch = AFG_channel(det_afg, 1)
y_ch = AFG_channel(det_afg, 2)
x_ch = AFG_channel(rf_afg)
X,Y,s = make_waveforms(X, Y)
npts = len(s) # number of waveform points
naqs = npts // 2 # number of acquisitions (pixels)
# load into the two AFGs
det_arb.npts = npts
det_arb.set_data(s)
det_arb.copy_to('user3') # ch1 : user3
det_arb.set_data(Y) # ch2 : emem
rf_arb.npts = npts
rf_arb.set_data(X)
# configure AFGs
s_ch.set_mode('user3')
y_ch.set_mode('emem')
x_ch.set_mode('emem')
total_time = naqs * t
for ch in (x_ch, y_ch, s_ch):
ch.freq = 1. / total_time
return naqs
class AFGasDAC(object):
def __init__(self, afg):
self.arb = Arb(afg)
def get_value(self):
return self.arb.get_point(1)
def set_value(self, value):
return self.arb.set_point(1, value)
from PyDAQmx import *
from .expt import make_pulse
def make_buffered_counter(samps, countchan, sampleclk, trig=None):
ctr = Task()
ctr.CreateCICountEdgesChan(countchan, "",
DAQmx_Val_Rising,
0, # initial count
DAQmx_Val_CountUp)
# configure sample clock (http://www.ni.com/white-paper/5404/en/)
ctr.CfgSampClkTiming(sampleclk, # source of the sample clock
10000, # rate of the sample clock.
# not sure about units, lifted from white paper
DAQmx_Val_Rising, # activeEdge
DAQmx_Val_FiniteSamps, # sampleMode
samps) # sampsPerChanToAcquire
if trig is not None:
# configure pause trigger
ctr.SetPauseTrigType(DAQmx_Val_DigLvl)
ctr.SetDigLvlPauseTrigSrc(trig)
ctr.SetDigLvlPauseTrigWhen(DAQmx_Val_Low)
return ctr
def many_samples(samps, timeout, pulsechan, countchan, sampleclk, trig=None):
ctr = make_buffered_counter(samps, countchan=countchan,
sampleclk=sampleclk,
trig=trig)
# make a pulse to trigger the burst on AFGs
co = make_pulse(.01, pulsechan=pulsechan)
ctr.StartTask() # start counting
co.StartTask() # send trigger
# while that's going, allocate some memory
spcr = c_long()
data = n.zeros((samps,), dtype=numpy.uint32)
ctr.WaitUntilTaskDone(timeout)
# now read out the data
ctr.ReadCounterU32(samps, # number of samples to read
10.0, # timeout
data, # readArray
samps, # arraySizeInSamps
spcr, # sampsPerChanRead
None) # reserved
ctr.StopTask()
co.StopTask()
#print spcr.value
assert spcr.value == samps
return data
def decode_image(result, shape, zigzag=False):
im = n.diff(n.insert(result,0,0))
im = im.reshape(*shape)
if zigzag:
im[1::2] = im[1::2][:,::-1] #zigzag
return im
# generators for the GUI
def generate_frames(X, Y, t=.01, repeat=True,
pulsechan="Dev1/ctr2",
countchan="Dev1/ctr0",
sampleclk="PFI34",
det_afg=None, rf_afg=None):
naqs = make_and_load_waveforms(X, Y, t, det_afg, rf_afg)
outshape = Y.shape + X.shape
timeout = 5. + naqs * t
while True:
try:
result = many_samples(naqs, timeout,
pulsechan=pulsechan,
countchan=countchan,
sampleclk=sampleclk)
except DAQError as e:
print(e)
raise StopIteration
yield decode_image(result, shape=outshape)/t
if not repeat:
break
def update_result(gen, resultarray):
"""
plays the same role as yielding_fromiter,
but expects gen to yield the whole result each time
"""
for result in gen:
resultarray[:] = result
yield
def make_generator_factory(xgalvo, ygalvo,
pulsechan, countchan, sampleclk,
det_afg, rf_afg):
# don't do anything with the galvos, they're fake
def make_data_generator(X, Y, t, vector, repeat=True):
gen = generate_frames(X, Y, t, repeat=repeat,
pulsechan=pulsechan,
countchan=countchan,
sampleclk=sampleclk,
det_afg=det_afg,
rf_afg=rf_afg,
)
return update_result(gen, vector)
return make_data_generator
def main():
import wx
from acquisition import FakeGalvoPixel, AcquisitionWindow
from .config import load_config
cfg = load_config()
pulsechan = cfg.get('fast', 'pulsechan')
countchan = cfg.get('fast', 'countchan')
sampleclk = cfg.get('fast', 'sampleclk')
import visa
rm = visa.ResourceManager()
det_afg = rm.get_instrument(cfg.get('fast', 'det_afg'), timeout=10e3)
rf_afg = rm.get_instrument(cfg.get('fast', 'rf_afg'), timeout=10e3)
ygalvo = AFGasDAC(det_afg)
xgalvo = AFGasDAC(rf_afg)
make_data_generator = make_generator_factory(xgalvo, ygalvo,
pulsechan, countchan,
sampleclk,
det_afg, rf_afg)
# gui app
app = wx.App(False)
frame = AcquisitionWindow(None, xgalvo, ygalvo,
make_data_generator, nvals=16383,
repeat=True)
frame.Show(True)
app.MainLoop()
if __name__ == "__main__":
main()
|
|
"""
Support for Nest devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/nest/
"""
import logging
import socket
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.const import (
CONF_STRUCTURE, CONF_FILENAME, CONF_BINARY_SENSORS, CONF_SENSORS,
CONF_MONITORED_CONDITIONS)
from homeassistant.loader import get_component
REQUIREMENTS = ['python-nest==3.1.0']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'nest'
DATA_NEST = 'nest'
NEST_CONFIG_FILE = 'nest.conf'
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
ATTR_HOME_MODE = 'home_mode'
ATTR_STRUCTURE = 'structure'
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(cv.ensure_list)
})
AWAY_SCHEMA = vol.Schema({
vol.Required(ATTR_HOME_MODE): cv.string,
vol.Optional(ATTR_STRUCTURE): vol.All(cv.ensure_list, cv.string)
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_STRUCTURE): vol.All(cv.ensure_list, cv.string),
vol.Optional(CONF_SENSORS): SENSOR_SCHEMA,
vol.Optional(CONF_BINARY_SENSORS): SENSOR_SCHEMA
})
}, extra=vol.ALLOW_EXTRA)
def request_configuration(nest, hass, config):
"""Request configuration steps from the user."""
configurator = get_component('configurator')
if 'nest' in _CONFIGURING:
_LOGGER.debug("configurator failed")
configurator.notify_errors(
_CONFIGURING['nest'], "Failed to configure, please try again.")
return
def nest_configuration_callback(data):
"""Run when the configuration callback is called."""
_LOGGER.debug("configurator callback")
pin = data.get('pin')
setup_nest(hass, nest, config, pin=pin)
_CONFIGURING['nest'] = configurator.request_config(
hass, "Nest", nest_configuration_callback,
description=('To configure Nest, click Request Authorization below, '
'log into your Nest account, '
'and then enter the resulting PIN'),
link_name='Request Authorization',
link_url=nest.authorize_url,
submit_caption="Confirm",
fields=[{'id': 'pin', 'name': 'Enter the PIN', 'type': ''}]
)
def setup_nest(hass, nest, config, pin=None):
"""Set up the Nest devices."""
if pin is not None:
_LOGGER.debug("pin acquired, requesting access token")
nest.request_token(pin)
if nest.access_token is None:
_LOGGER.debug("no access_token, requesting configuration")
request_configuration(nest, hass, config)
return
if 'nest' in _CONFIGURING:
_LOGGER.debug("configuration done")
configurator = get_component('configurator')
configurator.request_done(_CONFIGURING.pop('nest'))
_LOGGER.debug("proceeding with setup")
conf = config[DOMAIN]
hass.data[DATA_NEST] = NestDevice(hass, conf, nest)
_LOGGER.debug("proceeding with discovery")
discovery.load_platform(hass, 'climate', DOMAIN, {}, config)
discovery.load_platform(hass, 'camera', DOMAIN, {}, config)
sensor_config = conf.get(CONF_SENSORS, {})
discovery.load_platform(hass, 'sensor', DOMAIN, sensor_config, config)
binary_sensor_config = conf.get(CONF_BINARY_SENSORS, {})
discovery.load_platform(hass, 'binary_sensor', DOMAIN,
binary_sensor_config, config)
_LOGGER.debug("setup done")
return True
def setup(hass, config):
"""Set up the Nest thermostat component."""
import nest
if 'nest' in _CONFIGURING:
return
conf = config[DOMAIN]
client_id = conf[CONF_CLIENT_ID]
client_secret = conf[CONF_CLIENT_SECRET]
filename = config.get(CONF_FILENAME, NEST_CONFIG_FILE)
access_token_cache_file = hass.config.path(filename)
nest = nest.Nest(
access_token_cache_file=access_token_cache_file,
client_id=client_id, client_secret=client_secret)
setup_nest(hass, nest, config)
def set_mode(service):
"""Set the home/away mode for a Nest structure."""
if ATTR_STRUCTURE in service.data:
structures = service.data[ATTR_STRUCTURE]
else:
structures = hass.data[DATA_NEST].local_structure
for structure in nest.structures:
if structure.name in structures:
_LOGGER.info("Setting mode for %s", structure.name)
structure.away = service.data[ATTR_HOME_MODE]
else:
_LOGGER.error("Invalid structure %s",
service.data[ATTR_STRUCTURE])
hass.services.register(
DOMAIN, 'set_mode', set_mode, schema=AWAY_SCHEMA)
return True
class NestDevice(object):
"""Structure Nest functions for hass."""
def __init__(self, hass, conf, nest):
"""Init Nest Devices."""
self.hass = hass
self.nest = nest
if CONF_STRUCTURE not in conf:
self.local_structure = [s.name for s in nest.structures]
else:
self.local_structure = conf[CONF_STRUCTURE]
_LOGGER.debug("Structures to include: %s", self.local_structure)
def thermostats(self):
"""Generate a list of thermostats and their location."""
try:
for structure in self.nest.structures:
if structure.name in self.local_structure:
for device in structure.thermostats:
yield (structure, device)
else:
_LOGGER.debug("Ignoring structure %s, not in %s",
structure.name, self.local_structure)
except socket.error:
_LOGGER.error(
"Connection error logging into the nest web service.")
def smoke_co_alarms(self):
"""Generate a list of smoke co alarams."""
try:
for structure in self.nest.structures:
if structure.name in self.local_structure:
for device in structure.smoke_co_alarms:
yield(structure, device)
else:
_LOGGER.info("Ignoring structure %s, not in %s",
structure.name, self.local_structure)
except socket.error:
_LOGGER.error(
"Connection error logging into the nest web service.")
def cameras(self):
"""Generate a list of cameras."""
try:
for structure in self.nest.structures:
if structure.name in self.local_structure:
for device in structure.cameras:
yield(structure, device)
else:
_LOGGER.info("Ignoring structure %s, not in %s",
structure.name, self.local_structure)
except socket.error:
_LOGGER.error(
"Connection error logging into the nest web service.")
|
|
import numpy as nm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.terms.terms import Term, terms
class DivGradTerm(Term):
r"""
Diffusion term.
:Definition:
.. math::
\int_{\Omega} \nu\ \nabla \ul{v} : \nabla \ul{u} \mbox{ , }
\int_{\Omega} \nu\ \nabla \ul{u} : \nabla \ul{w} \\
\int_{\Omega} \nabla \ul{v} : \nabla \ul{u} \mbox{ , }
\int_{\Omega} \nabla \ul{u} : \nabla \ul{w}
:Arguments 1:
- material : :math:`\nu` (viscosity, optional)
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`\nu` (viscosity, optional)
- parameter_1 : :math:`\ul{u}`
- parameter_2 : :math:`\ul{w}`
"""
name = 'dw_div_grad'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'parameter_1', 'parameter_2'))
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'},
{'opt_material' : None}]
modes = ('weak', 'eval')
function = staticmethod(terms.term_ns_asm_div_grad)
def d_div_grad(self, out, grad1, grad2, mat, vg, fmode):
sh = grad1.shape
g1 = grad1.reshape((sh[0], sh[1], sh[2] * sh[3]))
g2 = grad2.reshape((sh[0], sh[1], sh[2] * sh[3]))
aux = mat * dot_sequences(g1[..., None], g2, 'ATB')[..., None]
if fmode == 2:
out[:] = aux
status = 0
else:
status = vg.integrate(out, aux, fmode)
return status
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if mat is None:
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
mat = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2))
sh = grad.shape
grad = grad.reshape((sh[0], sh[1], sh[2] * sh[3], 1))
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, mat, vg, fmode
elif mode == 'eval':
grad1 = self.get(virtual, 'grad')
grad2 = self.get(state, 'grad')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return grad1, grad2, mat, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, 1, 1), state.dtype
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.term_ns_asm_div_grad
else:
self.function = self.d_div_grad
class ConvectTerm(Term):
r"""
Nonlinear convective term.
:Definition:
.. math::
\int_{\Omega} ((\ul{u} \cdot \nabla) \ul{u}) \cdot \ul{v}
:Arguments:
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_convect'
arg_types = ('virtual', 'state')
arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D'}
function = staticmethod(terms.term_ns_asm_convect)
def get_fargs(self, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
val_qp = self.get(state, 'val')
fmode = diff_var is not None
return grad, val_qp, vg, fmode
class LinearConvectTerm(Term):
r"""
Linearized convective term.
:Definition:
.. math::
\int_{\Omega} ((\ul{b} \cdot \nabla) \ul{u}) \cdot \ul{v}
.. math::
((\ul{b} \cdot \nabla) \ul{u})|_{qp}
:Arguments:
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_lin_convect'
arg_types = ('virtual', 'parameter', 'state')
arg_shapes = {'virtual' : ('D', 'state'), 'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_lin_convect)
def get_fargs(self, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, val_qp, vg, fmode
elif mode == 'qp':
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 2
return grad, val_qp, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
class LinearConvect2Term(Term):
r"""
Linearized convective term with the convection velocity given as a material
parameter.
:Definition:
.. math::
\int_{\Omega} ((\ul{b} \cdot \nabla) \ul{u}) \cdot \ul{v}
.. math::
((\ul{b} \cdot \nabla) \ul{u})|_{qp}
:Arguments:
- material : :math:`\ul{b}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_lin_convect2'
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : 'D, 1',
'virtual' : ('D', 'state'), 'state' : 'D'}
function = staticmethod(terms.dw_lin_convect)
def get_fargs(self, material, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, material, vg, fmode
elif mode == 'qp':
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 2
return grad, material, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
class StokesTerm(Term):
r"""
Stokes problem coupling term. Corresponds to weak forms of gradient and
divergence terms. Can be evaluated.
:Definition:
.. math::
\int_{\Omega} p\ \nabla \cdot \ul{v} \mbox{ , }
\int_{\Omega} q\ \nabla \cdot \ul{u}
\mbox{ or }
\int_{\Omega} c\ p\ \nabla \cdot \ul{v} \mbox{ , }
\int_{\Omega} c\ q\ \nabla \cdot \ul{u}
:Arguments 1:
- material : :math:`c` (optional)
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`c` (optional)
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`c` (optional)
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_stokes'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'state', 'virtual'),
('opt_material', 'parameter_v', 'parameter_s'))
arg_shapes = [{'opt_material' : '1, 1',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1},
{'opt_material' : None}]
modes = ('grad', 'div', 'eval')
@staticmethod
def d_eval(out, coef, vec_qp, div, vvg):
out_qp = coef * vec_qp * div
status = vvg.integrate(out, out_qp)
return status
def get_fargs(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
if self.mode == 'grad':
qp_var, qp_name = svar, 'val'
else:
qp_var, qp_name = vvar, 'div'
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
if coef is None:
coef = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
vvg, _ = self.get_mapping(vvar)
svg, _ = self.get_mapping(svar)
if diff_var is None:
val_qp = self.get(qp_var, qp_name)
fmode = 0
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return coef, val_qp, svg, vvg, fmode
elif mode == 'eval':
vvg, _ = self.get_mapping(vvar)
div = self.get(vvar, 'div')
vec_qp = self.get(svar, 'val')
return coef, vec_qp, div, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types(self):
self.function = {
'grad' : terms.dw_grad,
'div' : terms.dw_div,
'eval' : self.d_eval,
}[self.mode]
class GradTerm(Term):
r"""
Evaluate gradient of a scalar or vector field.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\cal{D}} \nabla p \mbox{ or } \int_{\cal{D}} \nabla \ul{w}
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} \nabla p /
\int_{T_K} 1 \mbox{ or } \int_{T_K} \nabla \ul{w} /
\int_{T_K} 1
.. math::
(\nabla p)|_{qp} \mbox{ or } \nabla \ul{w}|_{qp}
:Arguments:
- parameter : :math:`p` or :math:`\ul{w}`
"""
name = 'ev_grad'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'N'}
integration = 'by_region'
surface_integration = 'surface_extra'
@staticmethod
def function(out, grad, vg, fmode):
if fmode == 2:
out[:] = grad
status = 0
else:
status = vg.integrate(out, grad, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
grad = self.get(parameter, 'grad', integration=self.integration)
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return grad, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, dim, n_c), parameter.dtype
class DivTerm(Term):
r"""
Evaluate divergence of a vector field.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\cal{D}} \nabla \cdot \ul{u}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} \nabla \cdot \ul{u} / \int_{T_K} 1
.. math::
(\nabla \cdot \ul{u})|_{qp}
:Arguments:
- parameter : :math:`\ul{u}`
"""
name = 'ev_div'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'D'}
integration = 'by_region'
surface_integration = 'surface_extra'
@staticmethod
def function(out, div, vg, fmode):
if fmode == 2:
out[:] = div
status = 0
else:
status = vg.integrate(out, div, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
div = self.get(parameter, 'div', integration=self.integration)
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return div, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, 1, 1), parameter.dtype
class DivOperatorTerm(Term):
r"""
Weighted divergence term of a test function.
:Definition:
.. math::
\int_{\Omega} \nabla \cdot \ul{v} \mbox { or } \int_{\Omega} c \nabla
\cdot \ul{v}
:Arguments:
- material : :math:`c` (optional)
- virtual : :math:`\ul{v}`
"""
name = 'dw_div'
arg_types = ('opt_material', 'virtual')
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : ('D', None)},
{'opt_material' : None}]
@staticmethod
def function(out, mat, vg):
div_bf = vg.bfg
n_el, n_qp, dim, n_ep = div_bf.shape
div_bf = div_bf.reshape((n_el, n_qp, dim * n_ep, 1))
div_bf = nm.ascontiguousarray(div_bf)
if mat is not None:
status = vg.integrate(out, mat * div_bf)
else:
status = vg.integrate(out, div_bf)
return status
def get_fargs(self, mat, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(virtual)
return mat, vg
class StokesWaveTerm(Term):
r"""
Stokes dispersion term with the wave vector :math:`\ul{\kappa}`.
:Definition:
.. math::
\int_{\Omega} (\ul{\kappa} \cdot \ul{v}) (\ul{\kappa} \cdot \ul{u})
:Arguments:
- material : :math:`\ul{\kappa}`
- virtual : :math:`\ul{v}`
- statee : :math:`\ul{u}`
"""
name = 'dw_stokes_wave'
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '.: D',
'virtual' : ('D', 'state'), 'state' : 'D'}
geometries = ['2_3', '2_4', '3_4', '3_8']
@staticmethod
def function(out, out_qp, geo, fmode):
status = geo.integrate(out, out_qp)
return status
def get_fargs(self, kappa, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
from sfepy.discrete.variables import create_adof_conn, expand_basis
geo, _ = self.get_mapping(state)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(virtual)
ebf = expand_basis(geo.bf, dim)
aux = nm.einsum('i,...ij->...j', kappa, ebf)[0, :, None, :]
kebf = insert_strided_axis(aux, 0, n_el)
if diff_var is None:
econn = state.field.get_econn('volume', self.region)
adc = create_adof_conn(nm.arange(state.n_dof, dtype=nm.int32),
econn, n_c, 0)
vals = state()[adc]
aux = dot_sequences(kebf, vals[:, None, :, None])
out_qp = dot_sequences(kebf, aux, 'ATB')
fmode = 0
else:
out_qp = dot_sequences(kebf, kebf, 'ATB')
fmode = 1
return out_qp, geo, fmode
class StokesWaveDivTerm(Term):
r"""
Stokes dispersion term with the wave vector :math:`\ul{\kappa}` and the
divergence operator.
:Definition:
.. math::
\int_{\Omega} (\ul{\kappa} \cdot \ul{v}) (\nabla \cdot \ul{u}) \;,
\int_{\Omega} (\ul{\kappa} \cdot \ul{u}) (\nabla \cdot \ul{v})
:Arguments 1:
- material : :math:`\ul{\kappa}`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`\ul{\kappa}`
- state : :math:`\ul{u}`
- virtual : :math:`\ul{v}`
"""
name = 'dw_stokes_wave_div'
arg_types = (('material', 'virtual', 'state'),
('material', 'state', 'virtual'))
arg_shapes = {'material' : '.: D',
'virtual' : ('D', 'state'), 'state' : 'D'}
geometries = ['2_3', '2_4', '3_4', '3_8']
modes = ('kd', 'dk')
@staticmethod
def function(out, out_qp, geo, fmode):
status = geo.integrate(out, out_qp)
return status
def get_fargs(self, kappa, kvar, dvar,
mode=None, term_mode=None, diff_var=None, **kwargs):
from sfepy.discrete.variables import create_adof_conn, expand_basis
geo, _ = self.get_mapping(dvar)
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(kvar)
ebf = expand_basis(geo.bf, dim)
aux = nm.einsum('i,...ij->...j', kappa, ebf)[0, :, None, :]
kebf = insert_strided_axis(aux, 0, n_el)
div_bf = geo.bfg
div_bf = div_bf.reshape((n_el, n_qp, 1, dim * n_en))
div_bf = nm.ascontiguousarray(div_bf)
if diff_var is None:
avar = dvar if self.mode == 'kd' else kvar
econn = avar.field.get_econn('volume', self.region)
adc = create_adof_conn(nm.arange(avar.n_dof, dtype=nm.int32),
econn, n_c, 0)
vals = avar()[adc]
if self.mode == 'kd':
aux = dot_sequences(div_bf, vals[:, None, :, None])
out_qp = dot_sequences(kebf, aux, 'ATB')
else:
aux = dot_sequences(kebf, vals[:, None, :, None])
out_qp = dot_sequences(div_bf, aux, 'ATB')
fmode = 0
else:
if self.mode == 'kd':
out_qp = dot_sequences(kebf, div_bf, 'ATB')
else:
out_qp = dot_sequences(div_bf, kebf, 'ATB')
fmode = 1
return out_qp, geo, fmode
class GradDivStabilizationTerm(Term):
r"""
Grad-div stabilization term ( :math:`\gamma` is a global stabilization
parameter).
:Definition:
.. math::
\gamma \int_{\Omega} (\nabla\cdot\ul{u}) \cdot (\nabla\cdot\ul{v})
:Arguments:
- material : :math:`\gamma`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_grad_div'
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D'}
function = staticmethod(terms.dw_st_grad_div)
def get_fargs(self, gamma, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if diff_var is None:
div = self.get(state, 'div')
fmode = 0
else:
div = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return div, gamma, vg, fmode
from sfepy.terms.terms_diffusion import LaplaceTerm
class PSPGPStabilizationTerm(LaplaceTerm):
r"""
PSPG stabilization term, pressure part ( :math:`\tau` is a local
stabilization parameter), alias to Laplace term dw_laplace.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ \nabla p \cdot \nabla q
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`q`
- state : :math:`p`
"""
name = 'dw_st_pspg_p'
class PSPGCStabilizationTerm(Term):
r"""
PSPG stabilization term, convective part ( :math:`\tau` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ ((\ul{b} \cdot \nabla) \ul{u})
\cdot \nabla q
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`q`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_pspg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : (1, None),
'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_st_pspg_c)
def get_fargs(self, tau, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
svg, _ = self.get_mapping(virtual)
vvg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
conn = state.field.get_connectivity(self.region, self.integration)
if diff_var is None:
fmode = 0
else:
fmode = 1
return val_qp, state(), tau, svg, vvg, conn, fmode
class SUPGPStabilizationTerm(Term):
r"""
SUPG stabilization term, pressure part ( :math:`\delta` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ \nabla p\cdot ((\ul{b} \cdot
\nabla) \ul{v})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`p`
"""
name = 'dw_st_supg_p'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', None),
'parameter' : 'D', 'state' : 1}
function = staticmethod(terms.dw_st_supg_p)
def get_fargs(self, delta, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vvg, _ = self.get_mapping(virtual)
svg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
if diff_var is None:
grad = self.get(state, 'grad')
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return val_qp, grad, delta, vvg, svg, fmode
class SUPGCStabilizationTerm(Term):
r"""
SUPG stabilization term, convective part ( :math:`\delta` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ ((\ul{b} \cdot \nabla)
\ul{u})\cdot ((\ul{b} \cdot \nabla) \ul{v})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_supg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_st_supg_c)
def get_fargs(self, delta, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(virtual)
val_qp = self.get(parameter, 'val')
conn = virtual.field.get_connectivity(self.region, self.integration)
if diff_var is None:
fmode = 0
else:
fmode = 1
return val_qp, state(), delta, vg, conn, fmode
|
|
from django.db import models
from django.contrib.auth.models import User
"""
class DataCatalog(models.Model):
objects = DataCatalogManager() # handles natural keys
name = models.CharField(max_length=200, unique=True)
manager = models.ForeignKey("Scientist",related_name="managed_datacatalogs",null=True,blank=True)
managing_organization = models.ForeignKey("Organization",related_name="managed_datacatalogs",null=True,blank=True)
def __unicode__(self):
return self.name
"""
class LicenseManager(models.Manager):
def get_by_natural_key(self,name):
return self.get(name=name)
class License(models.Model):
objects = LicenseManager() # handles natural keys
name = models.CharField(max_length=200,unique=True)
url = models.URLField(blank=True)
def __unicode__(self):
return self.name
class FormatManager(models.Manager):
def get_by_natural_key(self,name):
return self.get(name=name)
class Format(models.Model):
objects = FormatManager() # handles natural keys
name = models.CharField(max_length=200, unique=True)
url = models.URLField(blank=True)
def __unicode__(self):
return self.name
class KeywordManager(models.Manager):
def get_by_natural_key(self,keyword):
return self.get(keyword=keyword)
class Keyword(models.Model):
keyword = models.CharField(max_length=100, unique=True)
def __unicode__(self):
return self.keyword
class CategoryManager(models.Manager):
def get_by_natural_key(self,category):
return self.get(category=category)
class Category(models.Model):
category = models.CharField(max_length=100, unique=True)
def __unicode__(self):
return self.category
# The data models
class DataCatalogManager(models.Manager):
def get_by_natural_key(self,title):
return self.get(title=title)
class DataCatalog(models.Model):
objects = DataCatalogManager() # handles natural keys
title = models.CharField(max_length=200, unique=True)
description = models.TextField(blank=True)
issued = models.DateTimeField('date published')
modified = models.DateTimeField('date last edited')
language = models.CharField(max_length=3, default="en")
license = models.ForeignKey(License,related_name="licensed_datacatalogs",null=True,blank=True)
homepage = models.URLField(max_length=200,blank=True)
spatial = models.CharField( max_length=500, default="",
name="Geographic region of the dataset (if applicable)",
null=True,blank=True)
manager = models.ForeignKey("Scientist",related_name="managed_datacatalogs",null=True,blank=True)
managing_organization = models.ForeignKey("Organization",related_name="managed_datacatalogs",null=True,blank=True)
categories = models.ManyToManyField(Category, related_name='catalogs',
null=True, blank=True)
keywords = models.ManyToManyField(Keyword, related_name='catalogs',
null=True, blank=True)
def __unicode__(self):
return self.title
"""
class Dataset(models.Model):
objects = DatasetManager() # handles natural keys
data_catalog = models.ForeignKey(DataCatalog,null=True,blank=True)
# sets the registered user who added the dataset
added_by = models.ForeignKey(User,null=True,blank=True)
name = models.CharField(max_length=200)
date_published = models.DateTimeField('date published')
date_last_edited = models.DateTimeField('date last edited')
url = models.URLField(max_length=200)
documentation_url = models.URLField(max_length=200,blank=True)
download_url = models.URLField(max_length=200,blank=True)
description = models.TextField(blank=True)
# relationships
data_format = models.ForeignKey(Format,related_name="formatted_datasets",null=True,blank=True)
license = models.ForeignKey(License,related_name="licensed_datasets",null=True,blank=True)
derivatives = models.ManyToManyField('self', through='DataRelation', symmetrical=False,
related_name='sources',null=True,blank=True)
#sources = models.ManyToManyField('self', through='DataRelation', symmetrical=False,
# related_name='derivatives',null=True,blank=True)
contributors = models.ManyToManyField('Scientist', through='ContributorRelation', symmetrical=False,
related_name='contributed_datasets',null=True,blank=True)
manager = models.ForeignKey("Scientist",related_name="managed_datasets",null=True,blank=True)
# scientists who are identified as managers, and who are registered users of Clean.Data.
managing_organization = models.ForeignKey("Organization",related_name="managed_datasets",null=True,blank=True)
is_public = models.BooleanField(verbose_name="Is this visible to others?", default = True)
def __unicode__(self):
return self.name
#class Meta:
# unique_together = (('name','url'),)
"""
class DatasetManager(models.Manager):
def get_by_natural_key(self,title):
return self.get(title=title)
class Dataset(models.Model):
objects = DatasetManager() # handles natural keys
data_catalog = models.ForeignKey(DataCatalog,null=True,blank=True)
# sets the registered user who added the dataset
added_by = models.ForeignKey(User,null=True,blank=True)
title = models.CharField(max_length=300)
description = models.TextField(blank=True)
issued = models.DateTimeField('date published')
modified = models.DateTimeField('date last edited')
LANGUAGES = (
('en','English'),
('ab','Abkhaz'),
('aa','Afar'),
('af','Afrikaans'),
('ak','Akan'),
('sq','Albanian'),
('am','Amharic'),
('ar','Arabic'),
('an','Aragonese'),
('hy','Armenian'),
('as','Assamese'),
('av','Avaric'),
('ae','Avestan'),
('ay','Aymara'),
('az','Azerbaijani'),
('bm','Bambara'),
('ba','Bashkir'),
('eu','Basque'),
('be','Belarusian'),
('bn','Bengali'),
('bh','Bihari'),
('bi','Bislama'),
('bs','Bosnian'),
('br','Breton'),
('bg','Bulgarian'),
('my','Burmese'),
('ca','Catalan; Valencian'),
('ch','Chamorro'),
('ce','Chechen'),
('ny','Chichewa; Chewa; Nyanja'),
('zh','Chinese'),
('cv','Chuvash'),
('kw','Cornish'),
('co','Corsican'),
('cr','Cree'),
('hr','Croatian'),
('cs','Czech'),
('da','Danish'),
('dv','Divehi; Dhivehi; Maldivian;'),
('nl','Dutch'),
('eo','Esperanto'),
('et','Estonian'),
('ee','Ewe'),
('fo','Faroese'),
('fj','Fijian'),
('fi','Finnish'),
('fr','French'),
('ff','Fula; Fulah; Pulaar; Pular'),
('gl','Galician'),
('ka','Georgian'),
('de','German'),
('el','Greek, Modern'),
('gn','Guarani'),
('gu','Gujarati'),
('ht','Haitian; Haitian Creole'),
('ha','Hausa'),
('he','Hebrew (modern)'),
('hz','Herero'),
('hi','Hindi'),
('ho','Hiri Motu'),
('hu','Hungarian'),
('ia','Interlingua'),
('id','Indonesian'),
('ie','Interlingue'),
('ga','Irish'),
('ig','Igbo'),
('ik','Inupiaq'),
('io','Ido'),
('is','Icelandic'),
('it','Italian'),
('iu','Inuktitut'),
('ja','Japanese'),
('jv','Javanese'),
('kl','Kalaallisut, Greenlandic'),
('kn','Kannada'),
('kr','Kanuri'),
('ks','Kashmiri'),
('kk','Kazakh'),
('km','Khmer'),
('ki','Kikuyu, Gikuyu'),
('rw','Kinyarwanda'),
('ky','Kirghiz, Kyrgyz'),
('kv','Komi'),
('kg','Kongo'),
('ko','Korean'),
('ku','Kurdish'),
('kj','Kwanyama, Kuanyama'),
('la','Latin'),
('lb','Luxembourgish, Letzeburgesch'),
('lg','Luganda'),
('li','Limburgish, Limburgan, Limburger'),
('ln','Lingala'),
('lo','Lao'),
('lt','Lithuanian'),
('lu','Luba-Katanga'),
('lv','Latvian'),
('gv','Manx'),
('mk','Macedonian'),
('mg','Malagasy'),
('ms','Malay'),
('ml','Malayalam'),
('mt','Maltese'),
('mi','Maori'),
('mr','Marathi'),
('mh','Marshallese'),
('mn','Mongolian'),
('na','Nauru'),
('nv','Navajo, Navaho'),
('nb','Norwegian Bokmal'),
('nd','North Ndebele'),
('ne','Nepali'),
('ng','Ndonga'),
('nn','Norwegian Nynorsk'),
('no','Norwegian'),
('ii','Nuosu'),
('nr','South Ndebele'),
('oc','Occitan'),
('oj','Ojibwe, Ojibwa'),
('cu','Old Church Slavonic, Church Slavic, Church Slavonic, Old Bulgarian, Old Slavonic'),
('om','Oromo'),
('or','Oriya'),
('os','Ossetian, Ossetic'),
('pa','Panjabi, Punjabi'),
('pi','Pali'),
('fa','Persian'),
('pl','Polish'),
('ps','Pashto, Pushto'),
('pt','Portuguese'),
('qu','Quechua'),
('rm','Romansh'),
('rn','Kirundi'),
('ro','Romanian, Moldavian, Moldovan'),
('ru','Russian'),
('sa','Sanskrit'),
('sc','Sardinian'),
('sd','Sindhi'),
('se','Northern Sami'),
('sm','Samoan'),
('sg','Sango'),
('sr','Serbian'),
('gd','Scottish Gaelic; Gaelic'),
('sn','Shona'),
('si','Sinhala, Sinhalese'),
('sk','Slovak'),
('sl','Slovene'),
('so','Somali'),
('st','Southern Sotho'),
('es','Spanish; Castilian'),
('su','Sundanese'),
('sw','Swahili'),
('ss','Swati'),
('sv','Swedish'),
('ta','Tamil'),
('te','Telugu'),
('tg','Tajik'),
('th','Thai'),
('ti','Tigrinya'),
('bo','Tibetan Standard, Tibetan, Central'),
('tk','Turkmen'),
('tl','Tagalog'),
('tn','Tswana'),
('to','Tonga (Tonga Islands)'),
('tr','Turkish'),
('ts','Tsonga'),
('tt','Tatar'),
('tw','Twi'),
('ty','Tahitian'),
('ug','Uighur, Uyghur'),
('uk','Ukrainian'),
('ur','Urdu'),
('uz','Uzbek'),
('ve','Venda'),
('vi','Vietnamese'),
('vo','Volapuk'),
('wa','Walloon'),
('cy','Welsh'),
('wo','Wolof'),
('fy','Western Frisian'),
('xh','Xhosa'),
('yi','Yiddish'),
('yo','Yoruba'),
('za','Zhuang, Chuang')
)
language = models.CharField(max_length=2, verbose_name="Primary language", choices = LANGUAGES, default="en")
landingPage = models.URLField(max_length=200)
contactPoint = models.ForeignKey("Scientist",related_name="managed_datasets",null=True,blank=True)
#manager = models.ForeignKey("Scientist",related_name="managed_datasets",null=True,blank=True)
temporalStart = models.DateTimeField(verbose_name='Start date of dataset (if applicable)',null=True,blank=True,default=None)
temporalEnd = models.DateTimeField(verbose_name='End date of dataset (if applicable)',null=True,blank=True,default=None)
spatial = models.CharField(max_length=500, default="", verbose_name="Geographic region of the dataset (if applicable)",null=True,blank=True)
ACCRUAL_CHOICES = (
('NO', 'doesn\'t accrue'),
('AN', 'as needed'),
('IN', 'intermittent'),
('RT', 'real time'),
('MI', 'every minute'),
('HO', 'hourly'),
('DA', 'daily'),
('WE', 'weekly'),
('BW', 'biweekly'),
('MO', 'monthly'),
('BM', 'bimonthly'),
('QU', 'quarterly'),
('YE', 'yearly'),
('BY', 'biyearly'),
('DE', 'decadal'),
('OT', 'other'),
)
accrualPeriodicity = models.CharField( max_length=2,
choices = ACCRUAL_CHOICES,
default='NO',
name="Accrual period")
derivatives = models.ManyToManyField('self', through='DataRelation', symmetrical=False,
related_name='sources',null=True,blank=True)
contributors = models.ManyToManyField('Scientist', through='ContributorRelation', symmetrical=False,
related_name='contributed_datasets',null=True,blank=True)
# scientists who are identified as managers, and who are registered users of Clean.Data.
managing_organization = models.ForeignKey("Organization",related_name="managed_datasets",null=True,blank=True)
categories = models.ManyToManyField(Category, related_name='datasets',
null=True, blank=True)
keywords = models.ManyToManyField(Keyword, related_name='datasets',
null=True, blank=True)
is_public = models.BooleanField(verbose_name="Is this visible to others?", default = True)
def __unicode__(self):
return self.title
#class Meta:
# unique_together = (('name','url'),)
# sorts out natural identification of datasets
class DistributionManager(models.Manager):
def get_by_natural_key(self,dataset,data_format):
return self.get(dataset=dataset,data_format=data_format)
class Distribution(models.Model):
objects = DistributionManager() # handles natural keys
title = models.CharField(max_length=200,blank=True)
description = models.TextField(blank=True)
issued = models.DateTimeField('date published')
modified = models.DateTimeField('date last edited')
license = models.ForeignKey(License,related_name="licensed_datasets",null=True,blank=True)
# these are charfields rather than urls as we might have a filepath here instead for local files
accessURL = models.CharField(max_length=300,blank=True, verbose_name="access location")
downloadURL = models.CharField(max_length=300,blank=True, verbose_name = "file location")
dataset = models.ForeignKey(Dataset,related_name="distributions")
data_format = models.ForeignKey(Format,related_name="formatted_datasets",null=True,blank=True)
#documentation_url = models.URLField(max_length=200,blank=True)
def __unicode__(self):
#return self.data_format.name
return "({0}) {1}".format(self.data_format.name,self.dataset.title)
# The permissions model
class with_access(models.Model):
ReadOnly = 'RO'
ReadAndEdit = 'RW'
ReadEditDelete = 'RX'
ACCESS_LEVEL_CHOICES = (
('RO','Read only'),
('RW','Read and edit'),
('RX','Read, edit and delete')
)
dataset = models.ForeignKey(Dataset, related_name='accessors')
user = models.ForeignKey(Dataset, related_name='accessing')
access_level = models.CharField(max_length=2,
choices=ACCESS_LEVEL_CHOICES,
default=ReadOnly)
def can_edit(self):
return self.access_level in (self.ReadAndEdit, self.ReadEditDelete)
def can_delete(self):
return self.access_level in (self.ReadEditDelete)
class DataRelation(models.Model):
source = models.ForeignKey(Dataset, related_name='relation_to_derivative')
derivative = models.ForeignKey(Dataset, related_name='relation_to_source')
how_data_was_processed = models.TextField(max_length=20000,blank=True)
processing_url = models.URLField(max_length=200,blank=True,null=True)
def __unicode__(self):
return self.source.title+" -> "+self.derivative.title
# The membership/organisation models
# This is a class to store a reference to a datascientist that's not registered as a user of the site
# This will be most of the scientists.
# Note that the natural key is first, last, and profile url. This should keep things more or less unique.
class ScientistManager(models.Manager):
def get_by_natural_key(self,firstname,lastname,profile_url):
return self.get(firstname=firstname,lastname=lastname,profile_url=profile_url)
class Scientist(models.Model):
objects = ScientistManager() # handles natural keys
firstname = models.CharField(max_length=30)
lastname = models.CharField(max_length=30,blank=True)
user = models.OneToOneField(User,blank=True, null=True, related_name="scientist_profile")
github_url = models.URLField(max_length=200,blank=True,default="")
linkedin_url = models.URLField(max_length=200,blank=True,default="")
profile_url = models.URLField(max_length=200,blank=True,default="")
collaborators = models.ManyToManyField('self')
class Meta:
unique_together = (("firstname","lastname","profile_url"),)
def __unicode__(self):
return self.firstname+" "+self.lastname
class OrganizationManager(models.Manager):
def get_by_natural_key(self,name):
return self.get(name=name)
class Organization(models.Model):
objects = OrganizationManager() # handles natural keys
name = models.CharField(max_length=200,unique=True)
url = models.URLField(max_length=200,default="")
members = models.ManyToManyField(Scientist, through='MembershipRelation')
def __unicode__(self):
return self.name
class MembershipRelation(models.Model):
organization = models.ForeignKey(Organization, related_name='relation_to_member')
member = models.ForeignKey(Scientist, related_name='relation_to_organization')
def __unicode__(self):
return self.member.firstname+" "+self.member.lastname+" -> "+self.organization.name
class ContributorRelation(models.Model):
contributor = models.ForeignKey(Scientist, related_name='relation_to_data')
dataset = models.ForeignKey(Dataset, related_name='relation_to_contributor')
work_done = models.TextField(max_length=20000, blank=True)
def __unicode__(self):
return self.contributor.firstname+" "+self.contributor.lastname+ "-> "+self.contributor.firstname+" "+self.contributor.lastname
|
|
import os
from dateutil.parser import parse
from boto.s3.connection import S3Connection, Key
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.cors import CORSConfiguration
from boto.exception import S3ResponseError
from hurry.filesize import size, alternative
#Note: (from boto docs) this function is in beta
def enable_versioning(user_settings):
wrapper = S3Wrapper.from_addon(user_settings)
wrapper.bucket.configure_versioning(True)
def has_access(access_key, secret_key):
try:
c = S3Connection(access_key, secret_key)
c.get_all_buckets()
return True
except Exception:
return False
def get_bucket_list(user_settings):
return S3Connection(user_settings.access_key, user_settings.secret_key).get_all_buckets()
def create_bucket(user_settings, bucket_name):
connect = S3Connection(
user_settings.access_key, user_settings.secret_key)
return connect.create_bucket(bucket_name)
def does_bucket_exist(accessKey, secretKey, bucketName):
try:
c = S3Connection(accessKey, secretKey)
c.get_bucket(bucketName, validate=False)
return True
except Exception:
return False
class S3Wrapper(object):
@classmethod
def from_addon(cls, s3):
if s3 is None or s3.user_settings is None:
return None
if not s3.is_registration:
return cls(S3Connection(s3.user_settings.access_key, s3.user_settings.secret_key), s3.bucket)
else:
return RegistrationWrapper(s3)
@classmethod
def bucket_exist(cls, s3, bucketName):
m = cls.fromAddon(s3)
return not m.connection.lookup(bucketName.lower(), validate=False)
"S3 Bucket management"
def __init__(self, connection, bucket_name):
self.connection = connection
if bucket_name != bucket_name.lower():
self.connection.calling_format = OrdinaryCallingFormat()
self.bucket = self.connection.get_bucket(bucket_name, validate=False)
def create_key(self, key):
self.bucket.new_key(key)
def get_file_list(self, prefix=None):
if not prefix:
return self.bucket.list()
else:
return self.bucket.list(prefix=prefix)
def create_folder(self, name, pathToFolder=""):
if not name.endswith('/'):
name.append("/")
k = self.bucket.new_key(pathToFolder + name)
return k.set_contents_from_string("")
def delete_file(self, keyName):
return self.bucket.delete_key(keyName)
def download_file_URL(self, keyName, vid=None):
headers = {'response-content-disposition': 'attachment'}
return self.bucket.get_key(keyName, version_id=vid).generate_url(5, response_headers=headers)
def get_wrapped_keys(self, prefix=None):
return [S3Key(x) for x in self.get_file_list()]
def get_wrapped_key(self, key_name, vid=None):
"""Get S3 key.
:param str key_name: Name of S3 key
:param str version_id: Optional file version
:return: Wrapped S3 key if found, else None
"""
try:
key = self.bucket.get_key(key_name, version_id=vid)
if key is not None:
return S3Key(key)
return None
except S3ResponseError:
return None
def get_wrapped_keys_in_dir(self, directory=None):
return [S3Key(x) for x in self.bucket.list(delimiter='/', prefix=directory) if isinstance(x, Key) and x.key != directory]
def get_wrapped_directories_in_dir(self, directory=None):
return [S3Key(x) for x in self.bucket.list(prefix=directory) if isinstance(x, Key) and x.key.endswith('/') and x.key != directory]
@property
def bucket_name(self):
return self.bucket.name
def get_version_data(self):
versions = {}
versions_list = self.bucket.list_versions()
for p in versions_list:
if isinstance(p, Key) and str(p.version_id) != 'null' and str(p.key) not in versions:
versions[str(p.key)] = [str(k.version_id)
for k in versions_list if p.key == k.key]
return versions
# TODO update this to cache results later
def get_file_versions(self, file_name):
return [x for x in self.bucket.list_versions(prefix=file_name) if isinstance(x, Key)]
def get_cors_rules(self):
try:
return self.bucket.get_cors()
except:
return CORSConfiguration()
def set_cors_rules(self, rules):
return self.bucket.set_cors(rules)
def does_key_exist(self, key_name):
return self.bucket.get_key(key_name) is not None
# TODO Add null checks etc
class RegistrationWrapper(S3Wrapper):
def __init__(self, node_settings):
if node_settings.user_settings:
connection = S3Connection(
node_settings.user_settings.access_key,
node_settings.user_settings.secret_key,
)
else:
connection = S3Connection()
super(RegistrationWrapper, self).__init__(connection, node_settings.bucket)
self.registration_data = node_settings.registration_data
def get_wrapped_keys_in_dir(self, directory=None):
return [
S3Key(x)
for x in self.bucket.list_versions(delimiter='/', prefix=directory)
if isinstance(x, Key) and x.key != directory
and self.is_right_version(x)
]
def get_wrapped_directories_in_dir(self, directory=None):
return [S3Key(x) for x in self.bucket.list_versions(prefix=directory) if self._directory_check(x, directory)]
def _directory_check(self, to_check, against):
return isinstance(to_check, Key) and to_check.key.endswith('/') and to_check.key != against and self.is_right_version(to_check)
def is_right_version(self, key):
return [x for x in self.registration_data['keys'] if x['version_id'] == key.version_id and x['path'] == key.key]
def get_file_versions(self, key_name):
to_cut = [x for x in self.bucket.list_versions(
prefix=key_name) if isinstance(x, Key)]
return to_cut[self._get_index_of(self._get_proper_version(key_name), to_cut):]
def _get_proper_version(self, key_name):
vid = [x['version_id']
for x in self.registration_data['keys'] if x['path'] == key_name][0]
return self.bucket.get_key(key_name, version_id=vid)
def _get_index_of(self, version, to_cut):
return to_cut.index([x for x in to_cut if x.version_id == version.version_id][0])
# TODO Extend me and you bucket.setkeyclass
class S3Key(object):
def __init__(self, key):
self.s3Key = key
if self.type == 'file':
self.versions = ['current']
else:
self.versions = None
@property
def name(self):
d = self.s3Key.key.split('/')
if len(d) > 1 and self.type == 'file':
return d[-1]
elif self.type == 'folder':
return d[-2]
else:
return d[0]
@property
def type(self):
if not self.s3Key.key.endswith('/'):
return 'file'
else:
return 'folder'
@property
def parentFolder(self):
d = self.s3Key.key.split('/')
if len(d) > 1 and self.type == 'file':
return d[len(d) - 2]
elif len(d) > 2 and self.type == 'folder':
return d[len(d) - 3]
else:
return None
@property
def pathTo(self):
return self.s3Key.key[:self.s3Key.key.rfind('/')] + '/'
@property
def size(self):
if self.type == 'folder':
return None
else:
return size(float(self.s3Key.size), system=alternative)
@property
def lastMod(self):
if self.type == 'folder':
return None
else:
return parse(self.s3Key.last_modified)
@property
def version(self):
return self.versions
@property
def extension(self):
if self.type != 'folder':
if os.path.splitext(self.s3Key.key)[1] is None:
return None
else:
return os.path.splitext(self.s3Key.key)[1][1:]
else:
return None
@property
def md5(self):
return self.s3Key.md5
@property
def version_id(self):
return self.s3Key.version_id if self.s3Key.version_id != 'null' else 'Pre-versioning'
def updateVersions(self, manager):
if self.type != 'folder':
self.versions.extend(manager.get_file_versions(self.s3Key.key))
@property
def etag(self):
return self.s3Key.etag.replace('"', '')
|
|
#!/usr/bin/env python
# coding=utf-8
"""
An OPF `<dc:...>` metadatum.
This class can be used for both EPUB 2 and EPUB 3
DC metadata.
"""
from yael.jsonable import JSONAble
from yael.namespace import Namespace
from yael.opfmetadatum import OPFMetadatum
import yael.util
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2015, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__version__ = "0.0.9"
__email__ = "alberto@albertopettarin.it"
__status__ = "Development"
class OPFDC(OPFMetadatum):
"""
Build an OPF `<dc:...>` metadatum or
parse it from `obj` or `string`.
"""
A_DIR = "dir"
A_EVENT = "event"
A_FILE_AS = "file-as"
A_ID = "id"
A_LANG = "lang"
A_ROLE = "role"
A_SCHEME = "scheme"
A_NS_EVENT = "{{{0}}}{1}".format(Namespace.OPF, A_EVENT)
A_NS_FILE_AS = "{{{0}}}{1}".format(Namespace.OPF, A_FILE_AS)
A_NS_LANG = "{{{0}}}{1}".format(Namespace.XML, A_LANG)
A_NS_ROLE = "{{{0}}}{1}".format(Namespace.OPF, A_ROLE)
A_NS_SCHEME = "{{{0}}}{1}".format(Namespace.OPF, A_SCHEME)
def __init__(self, internal_path=None, obj=None, string=None):
self.v_dir = None
self.v_id = None
self.v_opf_event = None
self.v_opf_file_as = None
self.v_opf_role = None
self.v_opf_scheme = None
self.v_tag = None
self.v_text = None
self.v_xml_lang = None
OPFMetadatum.__init__(
self,
internal_path=internal_path,
obj=obj,
string=string)
def json_object(self, recursive=True):
obj = {
"dir": self.v_dir,
"id": self.v_id,
"opf_event": self.v_opf_event,
"opf_file_as": self.v_opf_file_as,
"opf_role": self.v_opf_role,
"opf_scheme": self.v_opf_scheme,
"tag": self.v_tag,
"text": self.v_text,
"xml_lang": self.v_xml_lang,
"refinements": len(self.refinements),
}
if recursive:
obj["refinements"] = JSONAble.safe(self.refinements)
return obj
def parse_object(self, obj):
self.v_dir = obj.get(OPFDC.A_DIR)
self.v_id = obj.get(OPFDC.A_ID)
self.v_opf_event = obj.get(OPFDC.A_NS_EVENT)
self.v_opf_file_as = obj.get(OPFDC.A_NS_FILE_AS)
self.v_opf_role = obj.get(OPFDC.A_NS_ROLE)
self.v_opf_scheme = obj.get(OPFDC.A_NS_SCHEME)
self.v_tag = obj.tag
self.v_text = yael.util.safe_strip(obj.text)
self.v_xml_lang = obj.get(OPFDC.A_NS_LANG)
@property
def v_dir(self):
"""
The value of the `dir` attribute.
EPUB 3 only.
:rtype: str
"""
return self.__v_dir
@v_dir.setter
def v_dir(self, v_dir):
self.__v_dir = v_dir
@property
def v_id(self):
"""
The value of the `id` attribute.
:rtype: str
"""
return self.__v_id
@v_id.setter
def v_id(self, v_id):
self.__v_id = v_id
@property
def v_opf_event(self):
"""
The value of the `opf:event` attribute.
EPUB 2 only.
:rtype: str
"""
return self.__v_opf_event
@v_opf_event.setter
def v_opf_event(self, v_opf_event):
self.__v_opf_event = v_opf_event
@property
def v_opf_file_as(self):
"""
The value of the `opf:file-as` attribute.
EPUB 2 only.
:rtype: str
"""
return self.__v_opf_file_as
@v_opf_file_as.setter
def v_opf_file_as(self, v_opf_file_as):
self.__v_opf_file_as = v_opf_file_as
@property
def v_opf_role(self):
"""
The value of the `opf:role` attribute.
EPUB 2 only.
:rtype: str
"""
return self.__v_opf_role
@v_opf_role.setter
def v_opf_role(self, v_opf_role):
self.__v_opf_role = v_opf_role
@property
def v_opf_scheme(self):
"""
The value of the `opf:scheme` attribute.
EPUB 2 only.
:rtype: str
"""
return self.__v_opf_scheme
@v_opf_scheme.setter
def v_opf_scheme(self, v_opf_scheme):
self.__v_opf_scheme = v_opf_scheme
@property
def v_tag(self):
"""
The tag of this metadatum.
:rtype: str
"""
return self.__v_tag
@v_tag.setter
def v_tag(self, v_tag):
self.__v_tag = v_tag
@property
def v_text(self):
"""
The text of this metadatum.
:rtype: str
"""
return self.__v_text
@v_text.setter
def v_text(self, v_text):
self.__v_text = v_text
@property
def v_xml_lang(self):
"""
The value of the `xml:lang` attribute.
EPUB 3 only.
:rtype: str
"""
return self.__v_xml_lang
@v_xml_lang.setter
def v_xml_lang(self, v_xml_lang):
self.__v_xml_lang = v_xml_lang
|
|
"""Support for Radio Thermostat wifi-enabled home thermostats."""
import logging
from socket import timeout
import radiotherm
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_OFF,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_HOME,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_HOST,
PRECISION_HALVES,
STATE_ON,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_FAN_ACTION = "fan_action"
CONF_HOLD_TEMP = "hold_temp"
PRESET_HOLIDAY = "holiday"
PRESET_ALTERNATE = "alternate"
STATE_CIRCULATE = "circulate"
PRESET_MODES = [PRESET_HOME, PRESET_ALTERNATE, PRESET_AWAY, PRESET_HOLIDAY]
OPERATION_LIST = [HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF]
CT30_FAN_OPERATION_LIST = [STATE_ON, HVAC_MODE_AUTO]
CT80_FAN_OPERATION_LIST = [STATE_ON, STATE_CIRCULATE, HVAC_MODE_AUTO]
# Mappings from radiotherm json data codes to and from Home Assistant state
# flags. CODE is the thermostat integer code and these map to and
# from Home Assistant state flags.
# Programmed temperature mode of the thermostat.
CODE_TO_TEMP_MODE = {
0: HVAC_MODE_OFF,
1: HVAC_MODE_HEAT,
2: HVAC_MODE_COOL,
3: HVAC_MODE_AUTO,
}
TEMP_MODE_TO_CODE = {v: k for k, v in CODE_TO_TEMP_MODE.items()}
# Programmed fan mode (circulate is supported by CT80 models)
CODE_TO_FAN_MODE = {0: HVAC_MODE_AUTO, 1: STATE_CIRCULATE, 2: STATE_ON}
FAN_MODE_TO_CODE = {v: k for k, v in CODE_TO_FAN_MODE.items()}
# Active thermostat state (is it heating or cooling?). In the future
# this should probably made into heat and cool binary sensors.
CODE_TO_TEMP_STATE = {0: CURRENT_HVAC_IDLE, 1: CURRENT_HVAC_HEAT, 2: CURRENT_HVAC_COOL}
# Active fan state. This is if the fan is actually on or not. In the
# future this should probably made into a binary sensor for the fan.
CODE_TO_FAN_STATE = {0: FAN_OFF, 1: FAN_ON}
PRESET_MODE_TO_CODE = {"home": 0, "alternate": 1, "away": 2, "holiday": 3}
CODE_TO_PRESET_MODE = {0: "home", 1: "alternate", 2: "away", 3: "holiday"}
def round_temp(temperature):
"""Round a temperature to the resolution of the thermostat.
RadioThermostats can handle 0.5 degree temps so the input
temperature is rounded to that value and returned.
"""
return round(temperature * 2.0) / 2.0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOLD_TEMP, default=False): cv.boolean,
}
)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_PRESET_MODE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Radio Thermostat."""
hosts = []
if CONF_HOST in config:
hosts = config[CONF_HOST]
else:
hosts.append(radiotherm.discover.discover_address())
if hosts is None:
_LOGGER.error("No Radiotherm Thermostats detected")
return False
hold_temp = config.get(CONF_HOLD_TEMP)
tstats = []
for host in hosts:
try:
tstat = radiotherm.get_thermostat(host)
tstats.append(RadioThermostat(tstat, hold_temp))
except OSError:
_LOGGER.exception("Unable to connect to Radio Thermostat: %s", host)
add_entities(tstats, True)
class RadioThermostat(ClimateEntity):
"""Representation of a Radio Thermostat."""
def __init__(self, device, hold_temp):
"""Initialize the thermostat."""
self.device = device
self._target_temperature = None
self._current_temperature = None
self._current_humidity = None
self._current_operation = HVAC_MODE_OFF
self._name = None
self._fmode = None
self._fstate = None
self._tmode = None
self._tstate = None
self._hold_temp = hold_temp
self._hold_set = False
self._prev_temp = None
self._preset_mode = None
self._program_mode = None
self._is_away = False
# Fan circulate mode is only supported by the CT80 models.
self._is_model_ct80 = isinstance(self.device, radiotherm.thermostat.CT80)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
async def async_added_to_hass(self):
"""Register callbacks."""
# Set the time on the device. This shouldn't be in the
# constructor because it's a network call. We can't put it in
# update() because calling it will clear any temporary mode or
# temperature in the thermostat. So add it as a future job
# for the event loop to run.
self.hass.async_add_job(self.set_time)
@property
def name(self):
"""Return the name of the Radio Thermostat."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_HALVES
@property
def extra_state_attributes(self):
"""Return the device specific state attributes."""
return {ATTR_FAN_ACTION: self._fstate}
@property
def fan_modes(self):
"""List of available fan modes."""
if self._is_model_ct80:
return CT80_FAN_OPERATION_LIST
return CT30_FAN_OPERATION_LIST
@property
def fan_mode(self):
"""Return whether the fan is on."""
return self._fmode
def set_fan_mode(self, fan_mode):
"""Turn fan on/off."""
code = FAN_MODE_TO_CODE.get(fan_mode)
if code is not None:
self.device.fmode = code
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def current_humidity(self):
"""Return the current temperature."""
return self._current_humidity
@property
def hvac_mode(self):
"""Return the current operation. head, cool idle."""
return self._current_operation
@property
def hvac_modes(self):
"""Return the operation modes list."""
return OPERATION_LIST
@property
def hvac_action(self):
"""Return the current running hvac operation if supported."""
if self.hvac_mode == HVAC_MODE_OFF:
return None
return self._tstate
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
if self._program_mode == 0:
return PRESET_HOME
if self._program_mode == 1:
return PRESET_ALTERNATE
if self._program_mode == 2:
return PRESET_AWAY
if self._program_mode == 3:
return PRESET_HOLIDAY
@property
def preset_modes(self):
"""Return a list of available preset modes."""
return PRESET_MODES
def update(self):
"""Update and validate the data from the thermostat."""
# Radio thermostats are very slow, and sometimes don't respond
# very quickly. So we need to keep the number of calls to them
# to a bare minimum or we'll hit the Home Assistant 10 sec warning. We
# have to make one call to /tstat to get temps but we'll try and
# keep the other calls to a minimum. Even with this, these
# thermostats tend to time out sometimes when they're actively
# heating or cooling.
try:
# First time - get the name from the thermostat. This is
# normally set in the radio thermostat web app.
if self._name is None:
self._name = self.device.name["raw"]
# Request the current state from the thermostat.
data = self.device.tstat["raw"]
if self._is_model_ct80:
humiditydata = self.device.humidity["raw"]
except radiotherm.validate.RadiothermTstatError:
_LOGGER.warning(
"%s (%s) was busy (invalid value returned)",
self._name,
self.device.host,
)
except timeout:
_LOGGER.warning(
"Timeout waiting for response from %s (%s)",
self._name,
self.device.host,
)
else:
if self._is_model_ct80:
self._current_humidity = humiditydata
self._program_mode = data["program_mode"]
self._preset_mode = CODE_TO_PRESET_MODE[data["program_mode"]]
# Map thermostat values into various STATE_ flags.
self._current_temperature = data["temp"]
self._fmode = CODE_TO_FAN_MODE[data["fmode"]]
self._fstate = CODE_TO_FAN_STATE[data["fstate"]]
self._tmode = CODE_TO_TEMP_MODE[data["tmode"]]
self._tstate = CODE_TO_TEMP_STATE[data["tstate"]]
self._current_operation = self._tmode
if self._tmode == HVAC_MODE_COOL:
self._target_temperature = data["t_cool"]
elif self._tmode == HVAC_MODE_HEAT:
self._target_temperature = data["t_heat"]
elif self._tmode == HVAC_MODE_AUTO:
# This doesn't really work - tstate is only set if the HVAC is
# active. If it's idle, we don't know what to do with the target
# temperature.
if self._tstate == CURRENT_HVAC_COOL:
self._target_temperature = data["t_cool"]
elif self._tstate == CURRENT_HVAC_HEAT:
self._target_temperature = data["t_heat"]
else:
self._current_operation = HVAC_MODE_OFF
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
temperature = round_temp(temperature)
if self._current_operation == HVAC_MODE_COOL:
self.device.t_cool = temperature
elif self._current_operation == HVAC_MODE_HEAT:
self.device.t_heat = temperature
elif self._current_operation == HVAC_MODE_AUTO:
if self._tstate == CURRENT_HVAC_COOL:
self.device.t_cool = temperature
elif self._tstate == CURRENT_HVAC_HEAT:
self.device.t_heat = temperature
# Only change the hold if requested or if hold mode was turned
# on and we haven't set it yet.
if kwargs.get("hold_changed", False) or not self._hold_set:
if self._hold_temp:
self.device.hold = 1
self._hold_set = True
else:
self.device.hold = 0
def set_time(self):
"""Set device time."""
# Calling this clears any local temperature override and
# reverts to the scheduled temperature.
now = dt_util.now()
self.device.time = {
"day": now.weekday(),
"hour": now.hour,
"minute": now.minute,
}
def set_hvac_mode(self, hvac_mode):
"""Set operation mode (auto, cool, heat, off)."""
if hvac_mode in (HVAC_MODE_OFF, HVAC_MODE_AUTO):
self.device.tmode = TEMP_MODE_TO_CODE[hvac_mode]
# Setting t_cool or t_heat automatically changes tmode.
elif hvac_mode == HVAC_MODE_COOL:
self.device.t_cool = self._target_temperature
elif hvac_mode == HVAC_MODE_HEAT:
self.device.t_heat = self._target_temperature
def set_preset_mode(self, preset_mode):
"""Set Preset mode (Home, Alternate, Away, Holiday)."""
if preset_mode in (PRESET_MODES):
self.device.program_mode = PRESET_MODE_TO_CODE[preset_mode]
else:
_LOGGER.error(
"Preset_mode %s not in PRESET_MODES",
preset_mode,
)
|
|
# u-msgpack-python v2.4.1 - v at sergeev.io
# https://github.com/vsergeev/u-msgpack-python
#
# u-msgpack-python is a lightweight MessagePack serializer and deserializer
# module, compatible with both Python 2 and 3, as well CPython and PyPy
# implementations of Python. u-msgpack-python is fully compliant with the
# latest MessagePack specification.com/msgpack/msgpack/blob/master/spec.md). In
# particular, it supports the new binary, UTF-8 string, and application ext
# types.
#
# MIT License
#
# Copyright (c) 2013-2016 vsergeev / Ivan (Vanya) A. Sergeev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
u-msgpack-python v2.4.1 - v at sergeev.io
https://github.com/vsergeev/u-msgpack-python
u-msgpack-python is a lightweight MessagePack serializer and deserializer
module, compatible with both Python 2 and 3, as well CPython and PyPy
implementations of Python. u-msgpack-python is fully compliant with the
latest MessagePack specification.com/msgpack/msgpack/blob/master/spec.md). In
particular, it supports the new binary, UTF-8 string, and application ext
types.
License: MIT
"""
import struct
import collections
import sys
import io
__version__ = "2.4.1"
"Module version string"
version = (2, 4, 1)
"Module version tuple"
##############################################################################
# Ext Class
##############################################################################
# Extension type for application-defined types and data
class Ext:
"""
The Ext class facilitates creating a serializable extension object to store
an application-defined type and data byte array.
"""
def __init__(self, type, data):
"""
Construct a new Ext object.
Args:
type: application-defined type integer from 0 to 127
data: application-defined data byte array
Raises:
TypeError:
Specified ext type is outside of 0 to 127 range.
Example:
>>> foo = umsgpack.Ext(0x05, b"\x01\x02\x03")
>>> umsgpack.packb({u"special stuff": foo, u"awesome": True})
'\x82\xa7awesome\xc3\xadspecial stuff\xc7\x03\x05\x01\x02\x03'
>>> bar = umsgpack.unpackb(_)
>>> print(bar["special stuff"])
Ext Object (Type: 0x05, Data: 01 02 03)
>>>
"""
# Application ext type should be 0 <= type <= 127
if not isinstance(type, int) or not (type >= 0 and type <= 127):
raise TypeError("ext type out of range")
# Check data is type bytes
elif sys.version_info[0] == 3 and not isinstance(data, bytes):
raise TypeError("ext data is not type \'bytes\'")
elif sys.version_info[0] == 2 and not isinstance(data, str):
raise TypeError("ext data is not type \'str\'")
self.type = type
self.data = data
def __eq__(self, other):
"""
Compare this Ext object with another for equality.
"""
return (isinstance(other, self.__class__) and
self.type == other.type and
self.data == other.data)
def __ne__(self, other):
"""
Compare this Ext object with another for inequality.
"""
return not self.__eq__(other)
def __str__(self):
"""
String representation of this Ext object.
"""
s = "Ext Object (Type: 0x%02x, Data: " % self.type
s += " ".join(["0x%02x" % ord(self.data[i:i + 1])
for i in xrange(min(len(self.data), 8))])
if len(self.data) > 8:
s += " ..."
s += ")"
return s
def __hash__(self):
"""
Provide a hash of this Ext object.
"""
return hash((self.type, self.data))
class InvalidString(bytes):
"""Subclass of bytes to hold invalid UTF-8 strings."""
pass
##############################################################################
# Exceptions
##############################################################################
# Base Exception classes
class PackException(Exception):
"Base class for exceptions encountered during packing."
pass
class UnpackException(Exception):
"Base class for exceptions encountered during unpacking."
pass
# Packing error
class UnsupportedTypeException(PackException):
"Object type not supported for packing."
pass
# Unpacking error
class InsufficientDataException(UnpackException):
"Insufficient data to unpack the serialized object."
pass
class InvalidStringException(UnpackException):
"Invalid UTF-8 string encountered during unpacking."
pass
class ReservedCodeException(UnpackException):
"Reserved code encountered during unpacking."
pass
class UnhashableKeyException(UnpackException):
"""
Unhashable key encountered during map unpacking.
The serialized map cannot be deserialized into a Python dictionary.
"""
pass
class DuplicateKeyException(UnpackException):
"Duplicate key encountered during map unpacking."
pass
# Backwards compatibility
KeyNotPrimitiveException = UnhashableKeyException
KeyDuplicateException = DuplicateKeyException
#############################################################################
# Exported Functions and Glob
#############################################################################
# Exported functions and variables, set up in __init()
pack = None
packb = None
unpack = None
unpackb = None
dump = None
dumps = None
load = None
loads = None
compatibility = False
"""
Compatibility mode boolean.
When compatibility mode is enabled, u-msgpack-python will serialize both
unicode strings and bytes into the old "raw" msgpack type, and deserialize the
"raw" msgpack type into bytes. This provides backwards compatibility with the
old MessagePack specification.
Example:
>>> umsgpack.compatibility = True
>>>
>>> umsgpack.packb([u"some string", b"some bytes"])
b'\x92\xabsome string\xaasome bytes'
>>> umsgpack.unpackb(_)
[b'some string', b'some bytes']
>>>
"""
##############################################################################
# Packing
##############################################################################
# You may notice struct.pack("B", obj) instead of the simpler chr(obj) in the
# code below. This is to allow for seamless Python 2 and 3 compatibility, as
# chr(obj) has a str return type instead of bytes in Python 3, and
# struct.pack(...) has the right return type in both versions.
def _pack_integer(obj, fp, options):
if obj < 0:
if obj >= -32:
fp.write(struct.pack("b", obj))
elif obj >= -2**(8 - 1):
fp.write(b"\xd0" + struct.pack("b", obj))
elif obj >= -2**(16 - 1):
fp.write(b"\xd1" + struct.pack(">h", obj))
elif obj >= -2**(32 - 1):
fp.write(b"\xd2" + struct.pack(">i", obj))
elif obj >= -2**(64 - 1):
fp.write(b"\xd3" + struct.pack(">q", obj))
else:
raise UnsupportedTypeException("huge signed int")
else:
if obj <= 127:
fp.write(struct.pack("B", obj))
elif obj <= 2**8 - 1:
fp.write(b"\xcc" + struct.pack("B", obj))
elif obj <= 2**16 - 1:
fp.write(b"\xcd" + struct.pack(">H", obj))
elif obj <= 2**32 - 1:
fp.write(b"\xce" + struct.pack(">I", obj))
elif obj <= 2**64 - 1:
fp.write(b"\xcf" + struct.pack(">Q", obj))
else:
raise UnsupportedTypeException("huge unsigned int")
def _pack_nil(obj, fp, options):
fp.write(b"\xc0")
def _pack_boolean(obj, fp, options):
fp.write(b"\xc3" if obj else b"\xc2")
def _pack_float(obj, fp, options):
float_precision = options.get('force_float_precision', _float_precision)
if float_precision == "double":
fp.write(b"\xcb" + struct.pack(">d", obj))
elif float_precision == "single":
fp.write(b"\xca" + struct.pack(">f", obj))
else:
raise ValueError("invalid float precision")
def _pack_string(obj, fp, options):
obj = obj.encode('utf-8')
if len(obj) <= 31:
fp.write(struct.pack("B", 0xa0 | len(obj)) + obj)
elif len(obj) <= 2**8 - 1:
fp.write(b"\xd9" + struct.pack("B", len(obj)) + obj)
elif len(obj) <= 2**16 - 1:
fp.write(b"\xda" + struct.pack(">H", len(obj)) + obj)
elif len(obj) <= 2**32 - 1:
fp.write(b"\xdb" + struct.pack(">I", len(obj)) + obj)
else:
raise UnsupportedTypeException("huge string")
def _pack_binary(obj, fp, options):
if len(obj) <= 2**8 - 1:
fp.write(b"\xc4" + struct.pack("B", len(obj)) + obj)
elif len(obj) <= 2**16 - 1:
fp.write(b"\xc5" + struct.pack(">H", len(obj)) + obj)
elif len(obj) <= 2**32 - 1:
fp.write(b"\xc6" + struct.pack(">I", len(obj)) + obj)
else:
raise UnsupportedTypeException("huge binary string")
def _pack_oldspec_raw(obj, fp, options):
if len(obj) <= 31:
fp.write(struct.pack("B", 0xa0 | len(obj)) + obj)
elif len(obj) <= 2**16 - 1:
fp.write(b"\xda" + struct.pack(">H", len(obj)) + obj)
elif len(obj) <= 2**32 - 1:
fp.write(b"\xdb" + struct.pack(">I", len(obj)) + obj)
else:
raise UnsupportedTypeException("huge raw string")
def _pack_ext(obj, fp, options):
if len(obj.data) == 1:
fp.write(b"\xd4" + struct.pack("B", obj.type & 0xff) + obj.data)
elif len(obj.data) == 2:
fp.write(b"\xd5" + struct.pack("B", obj.type & 0xff) + obj.data)
elif len(obj.data) == 4:
fp.write(b"\xd6" + struct.pack("B", obj.type & 0xff) + obj.data)
elif len(obj.data) == 8:
fp.write(b"\xd7" + struct.pack("B", obj.type & 0xff) + obj.data)
elif len(obj.data) == 16:
fp.write(b"\xd8" + struct.pack("B", obj.type & 0xff) + obj.data)
elif len(obj.data) <= 2**8 - 1:
fp.write(b"\xc7" +
struct.pack("BB", len(obj.data), obj.type & 0xff) + obj.data)
elif len(obj.data) <= 2**16 - 1:
fp.write(b"\xc8" +
struct.pack(">HB", len(obj.data), obj.type & 0xff) + obj.data)
elif len(obj.data) <= 2**32 - 1:
fp.write(b"\xc9" +
struct.pack(">IB", len(obj.data), obj.type & 0xff) + obj.data)
else:
raise UnsupportedTypeException("huge ext data")
def _pack_array(obj, fp, options):
if len(obj) <= 15:
fp.write(struct.pack("B", 0x90 | len(obj)))
elif len(obj) <= 2**16 - 1:
fp.write(b"\xdc" + struct.pack(">H", len(obj)))
elif len(obj) <= 2**32 - 1:
fp.write(b"\xdd" + struct.pack(">I", len(obj)))
else:
raise UnsupportedTypeException("huge array")
for e in obj:
pack(e, fp, **options)
def _pack_map(obj, fp, options):
if len(obj) <= 15:
fp.write(struct.pack("B", 0x80 | len(obj)))
elif len(obj) <= 2**16 - 1:
fp.write(b"\xde" + struct.pack(">H", len(obj)))
elif len(obj) <= 2**32 - 1:
fp.write(b"\xdf" + struct.pack(">I", len(obj)))
else:
raise UnsupportedTypeException("huge array")
for k, v in obj.items():
pack(k, fp, **options)
pack(v, fp, **options)
########################################
# Pack for Python 2, with 'unicode' type, 'str' type, and 'long' type
def _pack2(obj, fp, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
fp: a .write()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
None.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> f = open('test.bin', 'wb')
>>> umsgpack.pack({u"compact": True, u"schema": 0}, f)
>>>
"""
global compatibility
ext_handlers = options.get("ext_handlers")
if obj is None:
_pack_nil(obj, fp, options)
elif ext_handlers and obj.__class__ in ext_handlers:
_pack_ext(ext_handlers[obj.__class__](obj), fp, options)
elif isinstance(obj, bool):
_pack_boolean(obj, fp, options)
elif isinstance(obj, int) or isinstance(obj, long):
_pack_integer(obj, fp, options)
elif isinstance(obj, float):
_pack_float(obj, fp, options)
elif compatibility and isinstance(obj, unicode):
_pack_oldspec_raw(bytes(obj), fp, options)
elif compatibility and isinstance(obj, bytes):
_pack_oldspec_raw(obj, fp, options)
elif isinstance(obj, unicode):
_pack_string(obj, fp, options)
elif isinstance(obj, str):
_pack_binary(obj, fp, options)
elif isinstance(obj, list) or isinstance(obj, tuple):
_pack_array(obj, fp, options)
elif isinstance(obj, dict):
_pack_map(obj, fp, options)
elif isinstance(obj, Ext):
_pack_ext(obj, fp, options)
elif ext_handlers:
# Linear search for superclass
t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)
if t:
_pack_ext(ext_handlers[t](obj), fp, options)
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
else:
raise UnsupportedTypeException("unsupported type: %s" % str(type(obj)))
# Pack for Python 3, with unicode 'str' type, 'bytes' type, and no 'long' type
def _pack3(obj, fp, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
fp: a .write()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
None.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> f = open('test.bin', 'wb')
>>> umsgpack.pack({u"compact": True, u"schema": 0}, f)
>>>
"""
global compatibility
ext_handlers = options.get("ext_handlers")
if obj is None:
_pack_nil(obj, fp, options)
elif ext_handlers and obj.__class__ in ext_handlers:
_pack_ext(ext_handlers[obj.__class__](obj), fp, options)
elif isinstance(obj, bool):
_pack_boolean(obj, fp, options)
elif isinstance(obj, int):
_pack_integer(obj, fp, options)
elif isinstance(obj, float):
_pack_float(obj, fp, options)
elif compatibility and isinstance(obj, str):
_pack_oldspec_raw(obj.encode('utf-8'), fp, options)
elif compatibility and isinstance(obj, bytes):
_pack_oldspec_raw(obj, fp, options)
elif isinstance(obj, str):
_pack_string(obj, fp, options)
elif isinstance(obj, bytes):
_pack_binary(obj, fp, options)
elif isinstance(obj, list) or isinstance(obj, tuple):
_pack_array(obj, fp, options)
elif isinstance(obj, dict):
_pack_map(obj, fp, options)
elif isinstance(obj, Ext):
_pack_ext(obj, fp, options)
elif ext_handlers:
# Linear search for superclass
t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)
if t:
_pack_ext(ext_handlers[t](obj), fp, options)
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
def _packb2(obj, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
A 'str' containing serialized MessagePack bytes.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> umsgpack.packb({u"compact": True, u"schema": 0})
'\x82\xa7compact\xc3\xa6schema\x00'
>>>
"""
fp = io.BytesIO()
_pack2(obj, fp, **options)
return fp.getvalue()
def _packb3(obj, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
A 'bytes' containing serialized MessagePack bytes.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> umsgpack.packb({u"compact": True, u"schema": 0})
b'\x82\xa7compact\xc3\xa6schema\x00'
>>>
"""
fp = io.BytesIO()
_pack3(obj, fp, **options)
return fp.getvalue()
#############################################################################
# Unpacking
#############################################################################
def _read_except(fp, n):
data = fp.read(n)
if len(data) < n:
raise InsufficientDataException()
return data
def _unpack_integer(code, fp, options):
if (ord(code) & 0xe0) == 0xe0:
return struct.unpack("b", code)[0]
elif code == b'\xd0':
return struct.unpack("b", _read_except(fp, 1))[0]
elif code == b'\xd1':
return struct.unpack(">h", _read_except(fp, 2))[0]
elif code == b'\xd2':
return struct.unpack(">i", _read_except(fp, 4))[0]
elif code == b'\xd3':
return struct.unpack(">q", _read_except(fp, 8))[0]
elif (ord(code) & 0x80) == 0x00:
return struct.unpack("B", code)[0]
elif code == b'\xcc':
return struct.unpack("B", _read_except(fp, 1))[0]
elif code == b'\xcd':
return struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xce':
return struct.unpack(">I", _read_except(fp, 4))[0]
elif code == b'\xcf':
return struct.unpack(">Q", _read_except(fp, 8))[0]
raise Exception("logic error, not int: 0x%02x" % ord(code))
def _unpack_reserved(code, fp, options):
if code == b'\xc1':
raise ReservedCodeException(
"encountered reserved code: 0x%02x" % ord(code))
raise Exception(
"logic error, not reserved code: 0x%02x" % ord(code))
def _unpack_nil(code, fp, options):
if code == b'\xc0':
return None
raise Exception("logic error, not nil: 0x%02x" % ord(code))
def _unpack_boolean(code, fp, options):
if code == b'\xc2':
return False
elif code == b'\xc3':
return True
raise Exception("logic error, not boolean: 0x%02x" % ord(code))
def _unpack_float(code, fp, options):
if code == b'\xca':
return struct.unpack(">f", _read_except(fp, 4))[0]
elif code == b'\xcb':
return struct.unpack(">d", _read_except(fp, 8))[0]
raise Exception("logic error, not float: 0x%02x" % ord(code))
def _unpack_string(code, fp, options):
if (ord(code) & 0xe0) == 0xa0:
length = ord(code) & ~0xe0
elif code == b'\xd9':
length = struct.unpack("B", _read_except(fp, 1))[0]
elif code == b'\xda':
length = struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xdb':
length = struct.unpack(">I", _read_except(fp, 4))[0]
else:
raise Exception("logic error, not string: 0x%02x" % ord(code))
# Always return raw bytes in compatibility mode
global compatibility
if compatibility:
return _read_except(fp, length)
data = _read_except(fp, length)
try:
return bytes.decode(data, 'utf-8')
except UnicodeDecodeError:
if options.get("allow_invalid_utf8"):
return InvalidString(data)
raise InvalidStringException("unpacked string is invalid utf-8")
def _unpack_binary(code, fp, options):
if code == b'\xc4':
length = struct.unpack("B", _read_except(fp, 1))[0]
elif code == b'\xc5':
length = struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xc6':
length = struct.unpack(">I", _read_except(fp, 4))[0]
else:
raise Exception("logic error, not binary: 0x%02x" % ord(code))
return _read_except(fp, length)
def _unpack_ext(code, fp, options):
if code == b'\xd4':
length = 1
elif code == b'\xd5':
length = 2
elif code == b'\xd6':
length = 4
elif code == b'\xd7':
length = 8
elif code == b'\xd8':
length = 16
elif code == b'\xc7':
length = struct.unpack("B", _read_except(fp, 1))[0]
elif code == b'\xc8':
length = struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xc9':
length = struct.unpack(">I", _read_except(fp, 4))[0]
else:
raise Exception("logic error, not ext: 0x%02x" % ord(code))
ext = Ext(ord(_read_except(fp, 1)), _read_except(fp, length))
# Unpack with ext handler, if we have one
ext_handlers = options.get("ext_handlers")
if ext_handlers and ext.type in ext_handlers:
ext = ext_handlers[ext.type](ext)
return ext
def _unpack_array(code, fp, options):
if (ord(code) & 0xf0) == 0x90:
length = (ord(code) & ~0xf0)
elif code == b'\xdc':
length = struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xdd':
length = struct.unpack(">I", _read_except(fp, 4))[0]
else:
raise Exception("logic error, not array: 0x%02x" % ord(code))
return [_unpack(fp, options) for i in xrange(length)]
def _deep_list_to_tuple(obj):
if isinstance(obj, list):
return tuple([_deep_list_to_tuple(e) for e in obj])
return obj
def _unpack_map(code, fp, options):
if (ord(code) & 0xf0) == 0x80:
length = (ord(code) & ~0xf0)
elif code == b'\xde':
length = struct.unpack(">H", _read_except(fp, 2))[0]
elif code == b'\xdf':
length = struct.unpack(">I", _read_except(fp, 4))[0]
else:
raise Exception("logic error, not map: 0x%02x" % ord(code))
d = {} if not options.get('use_ordered_dict') \
else collections.OrderedDict()
for _ in xrange(length):
# Unpack key
k = _unpack(fp, options)
if isinstance(k, list):
# Attempt to convert list into a hashable tuple
k = _deep_list_to_tuple(k)
elif not isinstance(k, collections.Hashable):
raise UnhashableKeyException(
"encountered unhashable key: %s, %s" % (str(k), str(type(k))))
elif k in d:
raise DuplicateKeyException(
"encountered duplicate key: %s, %s" % (str(k), str(type(k))))
# Unpack value
v = _unpack(fp, options)
try:
d[k] = v
except TypeError:
raise UnhashableKeyException(
"encountered unhashable key: %s" % str(k))
return d
def _unpack(fp, options):
code = _read_except(fp, 1)
return _unpack_dispatch_table[code](code, fp, options)
########################################
def _unpack2(fp, **options):
"""
Deserialize MessagePack bytes into a Python object.
Args:
fp: a .read()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext
type to a callable that unpacks an instance of
Ext into an object
use_ordered_dict (bool): unpack maps into OrderedDict, instead of
unordered dict (default False)
allow_invalid_utf8 (bool): unpack invalid strings into instances of
InvalidString, for access to the bytes
(default False)
Returns:
A Python object.
Raises:
InsufficientDataException(UnpackException):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
Unhashable key encountered during map unpacking.
The serialized map cannot be deserialized into a Python dictionary.
DuplicateKeyException(UnpackException):
Duplicate key encountered during map unpacking.
Example:
>>> f = open('test.bin', 'rb')
>>> umsgpack.unpackb(f)
{u'compact': True, u'schema': 0}
>>>
"""
return _unpack(fp, options)
def _unpack3(fp, **options):
"""
Deserialize MessagePack bytes into a Python object.
Args:
fp: a .read()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext
type to a callable that unpacks an instance of
Ext into an object
use_ordered_dict (bool): unpack maps into OrderedDict, instead of
unordered dict (default False)
allow_invalid_utf8 (bool): unpack invalid strings into instances of
InvalidString, for access to the bytes
(default False)
Returns:
A Python object.
Raises:
InsufficientDataException(UnpackException):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
Unhashable key encountered during map unpacking.
The serialized map cannot be deserialized into a Python dictionary.
DuplicateKeyException(UnpackException):
Duplicate key encountered during map unpacking.
Example:
>>> f = open('test.bin', 'rb')
>>> umsgpack.unpackb(f)
{'compact': True, 'schema': 0}
>>>
"""
return _unpack(fp, options)
# For Python 2, expects a str object
def _unpackb2(s, **options):
"""
Deserialize MessagePack bytes into a Python object.
Args:
s: a 'str' or 'bytearray' containing serialized MessagePack bytes
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext
type to a callable that unpacks an instance of
Ext into an object
use_ordered_dict (bool): unpack maps into OrderedDict, instead of
unordered dict (default False)
allow_invalid_utf8 (bool): unpack invalid strings into instances of
InvalidString, for access to the bytes
(default False)
Returns:
A Python object.
Raises:
TypeError:
Packed data type is neither 'str' nor 'bytearray'.
InsufficientDataException(UnpackException):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
Unhashable key encountered during map unpacking.
The serialized map cannot be deserialized into a Python dictionary.
DuplicateKeyException(UnpackException):
Duplicate key encountered during map unpacking.
Example:
>>> umsgpack.unpackb(b'\x82\xa7compact\xc3\xa6schema\x00')
{u'compact': True, u'schema': 0}
>>>
"""
if not isinstance(s, (str, bytearray)):
raise TypeError("packed data must be type 'str' or 'bytearray'")
return _unpack(io.BytesIO(s), options)
# For Python 3, expects a bytes object
def _unpackb3(s, **options):
"""
Deserialize MessagePack bytes into a Python object.
Args:
s: a 'bytes' or 'bytearray' containing serialized MessagePack bytes
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext
type to a callable that unpacks an instance of
Ext into an object
use_ordered_dict (bool): unpack maps into OrderedDict, instead of
unordered dict (default False)
allow_invalid_utf8 (bool): unpack invalid strings into instances of
InvalidString, for access to the bytes
(default False)
Returns:
A Python object.
Raises:
TypeError:
Packed data type is neither 'bytes' nor 'bytearray'.
InsufficientDataException(UnpackException):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
Unhashable key encountered during map unpacking.
The serialized map cannot be deserialized into a Python dictionary.
DuplicateKeyException(UnpackException):
Duplicate key encountered during map unpacking.
Example:
>>> umsgpack.unpackb(b'\x82\xa7compact\xc3\xa6schema\x00')
{'compact': True, 'schema': 0}
>>>
"""
if not isinstance(s, (bytes, bytearray)):
raise TypeError("packed data must be type 'bytes' or 'bytearray'")
return _unpack(io.BytesIO(s), options)
#############################################################################
# Module Initialization
#############################################################################
def __init():
global pack
global packb
global unpack
global unpackb
global dump
global dumps
global load
global loads
global compatibility
global _float_precision
global _unpack_dispatch_table
global xrange
# Compatibility mode for handling strings/bytes with the old specification
compatibility = False
# Auto-detect system float precision
if sys.float_info.mant_dig == 53:
_float_precision = "double"
else:
_float_precision = "single"
# Map packb and unpackb to the appropriate version
if sys.version_info[0] == 3:
pack = _pack3
packb = _packb3
dump = _pack3
dumps = _packb3
unpack = _unpack3
unpackb = _unpackb3
load = _unpack3
loads = _unpackb3
xrange = range
else:
pack = _pack2
packb = _packb2
dump = _pack2
dumps = _packb2
unpack = _unpack2
unpackb = _unpackb2
load = _unpack2
loads = _unpackb2
# Build a dispatch table for fast lookup of unpacking function
_unpack_dispatch_table = {}
# Fix uint
for code in range(0, 0x7f + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer
# Fix map
for code in range(0x80, 0x8f + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_map
# Fix array
for code in range(0x90, 0x9f + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_array
# Fix str
for code in range(0xa0, 0xbf + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_string
# Nil
_unpack_dispatch_table[b'\xc0'] = _unpack_nil
# Reserved
_unpack_dispatch_table[b'\xc1'] = _unpack_reserved
# Boolean
_unpack_dispatch_table[b'\xc2'] = _unpack_boolean
_unpack_dispatch_table[b'\xc3'] = _unpack_boolean
# Bin
for code in range(0xc4, 0xc6 + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_binary
# Ext
for code in range(0xc7, 0xc9 + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_ext
# Float
_unpack_dispatch_table[b'\xca'] = _unpack_float
_unpack_dispatch_table[b'\xcb'] = _unpack_float
# Uint
for code in range(0xcc, 0xcf + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer
# Int
for code in range(0xd0, 0xd3 + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer
# Fixext
for code in range(0xd4, 0xd8 + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_ext
# String
for code in range(0xd9, 0xdb + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_string
# Array
_unpack_dispatch_table[b'\xdc'] = _unpack_array
_unpack_dispatch_table[b'\xdd'] = _unpack_array
# Map
_unpack_dispatch_table[b'\xde'] = _unpack_map
_unpack_dispatch_table[b'\xdf'] = _unpack_map
# Negative fixint
for code in range(0xe0, 0xff + 1):
_unpack_dispatch_table[struct.pack("B", code)] = _unpack_integer
__init()
|
|
import re
import time
from random import getrandbits
import globus_sdk
from tests.framework import (TransferClientTestCase, get_user_data,
GO_EP1_ID, GO_EP2_ID,
DEFAULT_TASK_WAIT_TIMEOUT,
DEFAULT_TASK_WAIT_POLLING_INTERVAL)
from globus_sdk.exc import TransferAPIError
from globus_sdk.transfer.paging import PaginatedResource
class ManagerTransferClientTests(TransferClientTestCase):
"""
class for Transfer Client Tests that require the activity_manager
effective role on an endpoint but don't require a unique endpoint per test.
Setup checks to see if a managed shared endpoint exists and if not creates
one. This endpoint is not removed during automatic cleanup, but will
be removed by the clean_sdk_test_assets.py script in manual_tools.
"""
__test__ = True # marks sub-class as having tests
@classmethod
def setUpClass(self):
"""
Sets up a shared endpoint on test gp#ep2 managed by sdktester1a,
and shares it with sdktester2b,
or sees that this endpoint already exits and gets its id.
"""
super(ManagerTransferClientTests, self).setUpClass()
try:
# shared endpoint hosted on go#ep2 managed by sdktester1a
host_path = "/~/managed_ep"
self.tc.operation_mkdir(GO_EP2_ID, path=host_path)
shared_data = {"DATA_TYPE": "shared_endpoint",
"host_endpoint": GO_EP2_ID,
"host_path": host_path,
"display_name": "SDK Test Managed Endpoint",
"description": "Endpoint for managed SDK testing"
}
r = self.tc.create_shared_endpoint(shared_data)
self.managed_ep_id = r["id"]
# share read and write to sdktester2b
add_data = {"DATA_TYPE": "access",
"principal_type": "identity",
"principal": get_user_data()["sdktester2b"]["id"],
"path": "/",
"permissions": "rw"}
self.tc.add_endpoint_acl_rule(self.managed_ep_id, add_data)
except TransferAPIError as e:
if "already exists" in str(e):
shares = self.tc.my_shared_endpoint_list(GO_EP2_ID)
self.managed_ep_id = shares["DATA"][0]["id"]
else:
raise e
def test_endpoint_manager_monitored_endpoints(self):
"""
Gets a list of all endpoints sdktester1a is an activity_manager on,
Confirms list contains managed_ep, and has some expected fields.
"""
ep_doc = self.tc.endpoint_manager_monitored_endpoints()
expected_fields = ["my_effective_roles", "display_name",
"owner_id", "owner_string"]
for ep in ep_doc:
self.assertEqual(ep["DATA_TYPE"], "endpoint")
for field in expected_fields:
self.assertIn(field, ep)
if ep["id"] == self.managed_ep_id:
break
else:
self.assertFalse("managed endpoint not found")
def test_endpoint_manager_get_endpoint(self):
"""
Gets the managed endpoint, confirms expected results
Confirms 403 when non manager attempts to use this resource.
"""
ep_doc = self.tc.endpoint_manager_get_endpoint(self.managed_ep_id)
self.assertEqual(ep_doc["DATA_TYPE"], "endpoint")
self.assertEqual(ep_doc["id"], self.managed_ep_id)
self.assertIsNone(ep_doc["in_use"])
# 403 for non managers
with self.assertRaises(TransferAPIError) as apiErr:
self.tc2.endpoint_manager_hosted_endpoint_list(self.managed_ep_id)
self.assertEqual(apiErr.exception.http_status, 403)
self.assertEqual(apiErr.exception.code, "PermissionDenied")
# TODO: test against a non shared endpoint we have the manager role on
def test_endpoint_manager_hosted_endpoint_list(self):
"""
Attempts to gets the list of shares hosted on the managed endpoint.
Confirms this fails as shares cannot themselves host shares.
Confirms 403 when non manager attempts to use this resource.
"""
with self.assertRaises(TransferAPIError) as apiErr:
self.tc.endpoint_manager_hosted_endpoint_list(self.managed_ep_id)
self.assertEqual(apiErr.exception.http_status, 409)
self.assertEqual(apiErr.exception.code, "Conflict")
self.assertIn("not a host endpoint", apiErr.exception.message)
# 403 for non managers
with self.assertRaises(TransferAPIError) as apiErr:
self.tc2.endpoint_manager_hosted_endpoint_list(self.managed_ep_id)
self.assertEqual(apiErr.exception.http_status, 403)
self.assertEqual(apiErr.exception.code, "PermissionDenied")
def test_endpoint_manager_acl_list(self):
"""
Gets ACL list from managed endpoint, validates results
Confirms 403 when non manager attempts to use this resource.
"""
list_doc = self.tc.endpoint_manager_acl_list(self.managed_ep_id)
self.assertEqual(list_doc["DATA_TYPE"], "access_list")
expected_fields = ["id", "principal", "principal_type", "permissions"]
for access in list_doc["DATA"]:
self.assertEqual(access["DATA_TYPE"], "access")
for field in expected_fields:
self.assertIn(field, access)
# 403 for non managers
with self.assertRaises(TransferAPIError) as apiErr:
self.tc2.endpoint_manager_acl_list(self.managed_ep_id)
self.assertEqual(apiErr.exception.http_status, 403)
self.assertEqual(apiErr.exception.code, "PermissionDenied")
def test_endpoint_manager_task_list(self):
"""
Has sdktester2b submit transfer and delete task to the managed_ep
Then has sdktester1a get its endpoint manager task list
Confirms tasks submitted by sdktester2b on the managed endpoint
are visible, and some expected fields are present.
"""
# sdktester2b submits tasks
# new dir with randomized name to prevent collision
dest_dir = "transfer_dest_dir-" + str(getrandbits(128))
dest_path = "/" + dest_dir + "/"
self.tc2.operation_mkdir(self.managed_ep_id, dest_path)
# transfer a file to the new dir
tdata = globus_sdk.TransferData(self.tc2, GO_EP1_ID,
self.managed_ep_id,
notify_on_succeeded=False)
source_path = "/share/godata/"
file_name = "file1.txt"
tdata.add_item(source_path + file_name, dest_path + file_name)
transfer_id = self.tc2.submit_transfer(tdata)["task_id"]
# delete the new dir
ddata = globus_sdk.DeleteData(self.tc2, self.managed_ep_id,
recursive=True,
notify_on_succeeded=False)
ddata.add_item(dest_path)
delete_id = self.tc2.submit_delete(ddata)["task_id"]
# sdktester1a gets endpoint manager task list
tasks_doc = self.tc.endpoint_manager_task_list(
filter_endpoint=GO_EP2_ID,
filter_user_id=get_user_data()["sdktester2b"]["id"])
# confirm submitted tasks can be found
# and tasks have some expected fields
expected_fields = ["username", "deadline", "type",
"source_endpoint_id"]
delete_found = False
transfer_found = False
self.assertIsInstance(tasks_doc, PaginatedResource)
for task in tasks_doc:
for field in expected_fields:
self.assertIn(field, task)
if task["task_id"] == transfer_id:
transfer_found = True
if task["task_id"] == delete_id:
delete_found = True
if transfer_found and delete_found:
break
# fail if both not found
self.assertTrue(delete_found and transfer_found)
def test_endpoint_manager_get_task(self):
"""
Has sdktester2b submit a no-op task on the managed endpoint
Confirms sdktester1a can view the task as an admin.
Confirms 403 when non manager attempts to use this resource.
"""
# sdktester2b subits no-op delete task
ddata = globus_sdk.DeleteData(self.tc2, self.managed_ep_id,
notify_on_fail=False)
ddata.add_item("no-op.txt")
task_id = self.tc2.submit_delete(ddata)["task_id"]
# sdktester1a gets the task as admin
task_doc = self.tc.endpoint_manager_get_task(task_id)
self.assertEqual(task_doc["task_id"], task_id)
self.assertEqual(task_doc["owner_id"],
get_user_data()["sdktester2b"]["id"])
self.assertEqual(task_doc["type"], "DELETE")
self.assertIn("status", task_doc)
# 403 for non managers, even if they submitted the task
with self.assertRaises(TransferAPIError) as apiErr:
self.tc2.endpoint_manager_get_task(task_id)
self.assertEqual(apiErr.exception.http_status, 403)
self.assertEqual(apiErr.exception.code, "PermissionDenied")
def test_endpoint_manager_task_event_list(self):
"""
Has sdktester2b submit a no-op task on the managed endpoint.
Waits for task to fail, and confirms sdktester1a can see the
failure event as an admin.
Confirms 403 when non manager attempts to use this resource.
"""
# sdktester2b subits no-op delete task and waits for completion
ddata = globus_sdk.DeleteData(self.tc2, self.managed_ep_id,
notify_on_fail=False)
ddata.add_item("no-op.txt")
task_id = self.tc2.submit_delete(ddata)["task_id"]
self.assertTrue(
self.tc2.task_wait(
task_id, timeout=DEFAULT_TASK_WAIT_TIMEOUT,
polling_interval=DEFAULT_TASK_WAIT_POLLING_INTERVAL))
# sdktester1a gets the task event list as admin
events_doc = self.tc.endpoint_manager_task_event_list(task_id)
self.assertIsInstance(events_doc, PaginatedResource)
failure_event = events_doc[0] # most recent event is first
self.assertEqual(failure_event["DATA_TYPE"], "event")
self.assertEqual(failure_event["code"], "FILE_NOT_FOUND")
self.assertEqual(failure_event["description"],
"No such file or directory")
# 403 for non managers, even if they submitted the task
with self.assertRaises(TransferAPIError) as apiErr:
self.tc2.endpoint_manager_task_event_list(task_id)
self.assertEqual(apiErr.exception.http_status, 403)
self.assertEqual(apiErr.exception.code, "PermissionDenied")
def test_endpoint_manager_task_successful_transfers(self):
"""
Has sdktester2b submit a recursive transfer of share/godata to the
managed_ep. Waits for the task to complete, then has sdktester1a get
the successful transfers of the task as an admin. Confirms all 3 files
are seen, and some expected fields are present.
"""
# new dir with randomized name to prevent collision
dest_dir = "transfer_dest_dir-" + str(getrandbits(128))
dest_path = "/" + dest_dir + "/"
self.tc2.operation_mkdir(self.managed_ep_id, dest_path)
# transfer a the files
tdata = globus_sdk.TransferData(self.tc2, GO_EP1_ID,
self.managed_ep_id,
notify_on_succeeded=False)
source_path = "/share/godata/"
tdata.add_item(source_path, dest_path, recursive=True)
task_id = self.tc2.submit_transfer(tdata)["task_id"]
# track asset for cleanup
self.asset_cleanup.append({"function": self.deleteHelper,
"args": [self.managed_ep_id, dest_path]})
# wait for task to complete
self.assertTrue(
self.tc2.task_wait(
task_id, timeout=DEFAULT_TASK_WAIT_TIMEOUT,
polling_interval=DEFAULT_TASK_WAIT_POLLING_INTERVAL))
# sdktester1a gets successful transfers as admin
success_doc = self.tc.endpoint_manager_task_successful_transfers(
task_id)
# confirm results
self.assertIsInstance(success_doc, PaginatedResource)
count = 0
for transfer in success_doc:
self.assertEqual(transfer["DATA_TYPE"], "successful_transfer")
self.assertIsNotNone(re.match(dest_path + "file[1-3].txt",
transfer["destination_path"]))
count += 1
self.assertEqual(count, 3)
def _unauthorized_transfers(self):
"""
Helper that has sdktester2b submit 3 unauthorized transfers from the
managed endpoint, returns a list of their task_ids,
and tracks them for cleanup.
"""
# submit the tasks
task_ids = []
for i in range(3):
tdata = globus_sdk.TransferData(self.tc2, self.managed_ep_id,
GO_EP1_ID, notify_on_fail=False)
tdata.add_item("/", "/", recursive=True)
task_ids.append(self.tc2.submit_transfer(tdata)["task_id"])
# track assets for cleanup
self.asset_cleanup.append(
{"function": self.tc.endpoint_manager_cancel_tasks,
"args": [task_ids, "Cleanup for unauthorized_transfers helper"]})
return task_ids
def test_endpoint_manager_cancel_tasks(self):
"""
Get task ids from _unauthorized transfers, and has sdktester1a cancel
those tasks. Validates results.
Confirms 403 when non manager attempts to use this resource.
"""
# cancel the tasks
task_ids = self._unauthorized_transfers()
message = "SDK test cancel tasks"
cancel_doc = self.tc.endpoint_manager_cancel_tasks(task_ids, message)
# validate results
self.assertEqual(cancel_doc["DATA_TYPE"], "admin_cancel")
self.assertIn("done", cancel_doc)
self.assertIn("id", cancel_doc)
# 403 for non managers, even if they submitted the tasks
with self.assertRaises(TransferAPIError) as apiErr:
self.tc2.endpoint_manager_cancel_tasks(task_ids, message)
self.assertEqual(apiErr.exception.http_status, 403)
self.assertEqual(apiErr.exception.code, "PermissionDenied")
def test_endpoint_manager_cancel_status(self):
"""
Has sdktester2b submit three unauthorized transfers from the managed
endpoint, and sdktester1a admin_cancel those tasks.
Gets the cancel status of the cancel and validates results.
Loops while status is not done, then confirms all tasks canceled.
"""
# cancel the tasks and get the cancel id
task_ids = self._unauthorized_transfers()
message = "SDK test cancel status"
cancel_id = self.tc.endpoint_manager_cancel_tasks(
task_ids, message)["id"]
# loop while not done or fail after 30 tries, 1 try per second
for tries in range(30):
# get and validate cancel status
status_doc = self.tc.endpoint_manager_cancel_status(cancel_id)
self.assertEqual(status_doc["DATA_TYPE"], "admin_cancel")
self.assertEqual(status_doc["id"], cancel_id)
if status_doc["done"]:
break
else:
time.sleep(1)
# confirm sdktester2b now sees all tasks as canceled by admin.
for task_id in task_ids:
task_doc = self.tc2.get_task(task_id)
self.assertEqual(task_doc["canceled_by_admin"], "SOURCE")
self.assertEqual(task_doc["canceled_by_admin_message"], message)
def test_endpoint_manager_pause_tasks(self):
"""
Has sdktester2b submit three unauthorized transfers,
and sdktester1a pause the tasks as an admin.
Validates results and confirms the tasks are paused.
Confirms 403 when non manager attempts to use this resource.
"""
# pause the tasks
task_ids = self._unauthorized_transfers()
message = "SDK test pause tasks"
pause_doc = self.tc.endpoint_manager_pause_tasks(task_ids, message)
# validate results
self.assertEqual(pause_doc["DATA_TYPE"], "result")
self.assertEqual(pause_doc["code"], "PauseAccepted")
# confirm sdktester2b sees the tasks as paused
for task_id in task_ids:
task_doc = self.tc2.get_task(task_id)
self.assertTrue(task_doc["is_paused"])
# 403 for non managers
with self.assertRaises(TransferAPIError) as apiErr:
self.tc2.endpoint_manager_pause_tasks(task_ids, message)
self.assertEqual(apiErr.exception.http_status, 403)
self.assertEqual(apiErr.exception.code, "PermissionDenied")
def test_endpoint_manager_resume_tasks(self):
"""
Has sdktester2b submit three unauthorized transfers,
then sdktester1a pauses then resumes the tasks as an admin.
Confirms tasks go from paused to active.
Confirms 403 when non manager attempts to use this resource.
"""
# pause the tasks and confirm they are paused
task_ids = self._unauthorized_transfers()
message = "SDK test resume tasks"
self.tc.endpoint_manager_pause_tasks(task_ids, message)
for task_id in task_ids:
task_doc = self.tc2.get_task(task_id)
self.assertTrue(task_doc["is_paused"])
# resume the tasks and validate results
resume_doc = self.tc.endpoint_manager_resume_tasks(task_ids)
self.assertEqual(resume_doc["DATA_TYPE"], "result")
self.assertEqual(resume_doc["code"], "ResumeAccepted")
# confirm tasks are now active.
for task_id in task_ids:
task_doc = self.tc2.get_task(task_id)
self.assertFalse(task_doc["is_paused"])
# 403 for non managers
with self.assertRaises(TransferAPIError) as apiErr:
self.tc2.endpoint_manager_resume_tasks(task_ids)
self.assertEqual(apiErr.exception.http_status, 403)
self.assertEqual(apiErr.exception.code, "PermissionDenied")
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 12 17:53:32 2017
@author: sakurai
"""
from __future__ import print_function
from collections import OrderedDict
import os
import numpy
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
from chainer.dataset.convert import concat_examples
from chainer.dataset import download
from chainer.functions.activation.relu import relu
from chainer.functions.activation.softmax import softmax
from chainer.functions.array.reshape import reshape
from chainer.functions.math.sum import sum
from chainer.functions.pooling.average_pooling_2d import average_pooling_2d
from chainer.functions.pooling.max_pooling_2d import max_pooling_2d
from chainer.functions.normalization.local_response_normalization import (
local_response_normalization)
from chainer.functions.noise.dropout import dropout
from chainer.initializers import constant
from chainer.initializers import uniform
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer.links.connection.inception import Inception
from chainer.links.connection.linear import Linear
from chainer.serializers import npz
from chainer.utils import imgproc
from chainer.variable import Variable
class GoogLeNet(link.Chain):
"""A pre-trained GoogLeNet model provided by BVLC [1].
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
.. [1] https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet `_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically downloads the caffemodel from the internet.
Note that in this case the converted chainer model is stored
on ``$CHAINER_DATASET_ROOT/pfnet/chainer/models`` directory,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
as a environment variable. The converted chainer model is
automatically used from the second time.
If the argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.GlorotUniform(scale=1.0)``.
Attributes:
available_layers (list of str): The list of available layer names
used by ``__call__`` and ``extract`` methods.
"""
image_mean = numpy.array([104, 117, 123], dtype=numpy.float32) # BGR
def __init__(self, pretrained_model='auto'):
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
kwargs = {'initialW': constant.Zero()}
else:
# employ default initializers used in the original paper
kwargs = {'initialW': uniform.GlorotUniform(scale=1.0)}
super(GoogLeNet, self).__init__(
conv1=Convolution2D(3, 64, 7, stride=2, pad=3, **kwargs),
conv2_reduce=Convolution2D(64, 64, 1, **kwargs),
conv2=Convolution2D(64, 192, 3, stride=1, pad=1, **kwargs),
inc3a=Inception(192, 64, 96, 128, 16, 32, 32),
inc3b=Inception(256, 128, 128, 192, 32, 96, 64),
inc4a=Inception(480, 192, 96, 208, 16, 48, 64),
inc4b=Inception(512, 160, 112, 224, 24, 64, 64),
inc4c=Inception(512, 128, 128, 256, 24, 64, 64),
inc4d=Inception(512, 112, 144, 288, 32, 64, 64),
inc4e=Inception(528, 256, 160, 320, 32, 128, 128),
inc5a=Inception(832, 256, 160, 320, 32, 128, 128),
inc5b=Inception(832, 384, 192, 384, 48, 128, 128),
loss3_fc=Linear(1024, 1000, **kwargs),
loss1_conv=Convolution2D(512, 128, 1, **kwargs),
loss1_fc1=Linear(2048, 1024, **kwargs),
loss1_fc2=Linear(1024, 1000, **kwargs),
loss2_conv=Convolution2D(528, 128, 1, **kwargs),
loss2_fc1=Linear(2048, 1024, **kwargs),
loss2_fc2=Linear(1024, 1000, **kwargs)
)
if pretrained_model == 'auto':
_retrieve(
'bvlc_googlenet.npz',
'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel',
self)
elif pretrained_model:
npz.load_npz(pretrained_model, self)
self.functions = OrderedDict([
('conv1', [self.conv1, relu]),
('pool1', [_max_pooling_2d, _local_response_normalization]),
('conv2_reduce', [self.conv2_reduce, relu]),
('conv2', [self.conv2, relu, _local_response_normalization]),
('pool2', [_max_pooling_2d]),
('inception_3a', [self.inc3a]),
('inception_3b', [self.inc3b]),
('pool3', [_max_pooling_2d]),
('inception_4a', [self.inc4a]),
('inception_4b', [self.inc4b]),
('inception_4c', [self.inc4c]),
('inception_4d', [self.inc4d]),
('inception_4e', [self.inc4e]),
('pool4', [_max_pooling_2d]),
('inception_5a', [self.inc5a]),
('inception_5b', [self.inc5b]),
('pool5', [_average_pooling_2d_k7]),
('loss3_fc', [_dropout, self.loss3_fc]),
('prob', [softmax]),
# Since usually the following outputs are not used, they are put
# after 'prob' to be skipped for efficiency.
('loss1_fc2', [_average_pooling_2d_k5, self.loss1_conv, relu,
self.loss1_fc1, relu, self.loss1_fc2]),
('loss2_fc2', [_average_pooling_2d_k5, self.loss2_conv, relu,
self.loss2_fc1, relu, self.loss2_fc2])
])
@property
def available_layers(self):
return list(self.functions.keys())
@classmethod
def convert_caffemodel_to_npz(cls, path_caffemodel, path_npz):
"""Converts a pre-trained caffemodel to a chainer model.
Args:
path_caffemodel (str): Path of the pre-trained caffemodel.
path_npz (str): Path of the converted chainer model.
"""
# As CaffeFunction uses shortcut symbols,
# we import CaffeFunction here.
from chainer.links.caffe.caffe_function import CaffeFunction
caffemodel = CaffeFunction(path_caffemodel)
chainermodel = cls(pretrained_model=None)
_transfer_googlenet(caffemodel, chainermodel)
npz.save_npz(path_npz, chainermodel, compression=False)
def __call__(self, x, layers=['prob'], train=False):
"""Computes all the feature maps specified by ``layers``.
Args:
x (~chainer.Variable): Input variable. It should be prepared by
``prepare`` function.
layers (list of str): The list of layer names you want to extract.
train (bool): If ``True``, Dropout runs in training mode.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
h = x
activations = {}
inception_4a_cache = None
inception_4d_cache = None
target_layers = set(layers)
for key, funcs in self.functions.items():
if len(target_layers) == 0:
break
if key == 'loss1_fc2':
h = inception_4a_cache
elif key == 'loss2_fc2':
h = inception_4d_cache
for func in funcs:
h = func(h)
if key in target_layers:
activations[key] = h
target_layers.remove(key)
if key == 'inception_4a':
inception_4a_cache = h
elif key == 'inception_4d':
inception_4d_cache = h
return activations
def extract(self, images, layers=['pool5'], size=(224, 224)):
"""Extracts all the feature maps of given images.
The difference of directly executing ``__call__`` is that
it directly accepts images as an input and automatically
transforms them to a proper variable. That is,
it is also interpreted as a shortcut method that implicitly calls
``prepare`` and ``__call__`` functions.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
layers (list of str): The list of layer names you want to extract.
size (pair of ints): The resolution of resized images used as
an input of CNN. All the given images are not resized
if this argument is ``None``, but the resolutions of
all the images should be the same.
train (bool): If ``True``, Dropout runs in training mode.
volatile (~chainer.Flag): Volatility flag used for input variables.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
x = concat_examples([prepare(img, size=size) for img in images])
x = Variable(self.xp.asarray(x))
return self(x, layers=layers)
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Set volatile option to ON to reduce memory consumption
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = y.data.shape[0] // 10
y_shape = y.data.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = sum(y, axis=1) / 10
return y
def prepare(image, size=(224, 224)):
"""Converts the given image to the numpy array for ResNets.
Note that you have to call this method before ``__call__``
because the pre-trained GoogLeNet model requires to resize the given
image, covert the RGB to the BGR, subtract the mean,
and permute the dimensions before calling.
Args:
image (PIL.Image or numpy.ndarray): Input image.
If an input is ``numpy.ndarray``, its shape must be
``(height, width)``, ``(height, width, channels)``,
or ``(channels, height, width)``, and
the order of the channels must be RGB.
size (pair of ints): Size of converted images.
If ``None``, the given image is not resized.
Returns:
numpy.ndarray: The converted output array.
"""
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
if isinstance(image, numpy.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(numpy.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = numpy.asarray(image, dtype=numpy.float32)
image = image[:, :, ::-1]
image -= GoogLeNet.image_mean
image = image.transpose((2, 0, 1))
return image
def _transfer_inception(src, dst, names):
for name in names:
chain = getattr(dst, 'inc{}'.format(name))
src_prefix = 'inception_{}/'.format(name)
chain.conv1.W.data[:] = src[src_prefix + '1x1'].W.data
chain.conv1.b.data[:] = src[src_prefix + '1x1'].b.data
chain.proj3.W.data[:] = src[src_prefix + '3x3_reduce'].W.data
chain.proj3.b.data[:] = src[src_prefix + '3x3_reduce'].b.data
chain.conv3.W.data[:] = src[src_prefix + '3x3'].W.data
chain.conv3.b.data[:] = src[src_prefix + '3x3'].b.data
chain.proj5.W.data[:] = src[src_prefix + '5x5_reduce'].W.data
chain.proj5.b.data[:] = src[src_prefix + '5x5_reduce'].b.data
chain.conv5.W.data[:] = src[src_prefix + '5x5'].W.data
chain.conv5.b.data[:] = src[src_prefix + '5x5'].b.data
chain.projp.W.data[:] = src[src_prefix + 'pool_proj'].W.data
chain.projp.b.data[:] = src[src_prefix + 'pool_proj'].b.data
def _transfer_googlenet(src, dst):
# 1 #################################################################
dst.conv1.W.data[:] = src['conv1/7x7_s2'].W.data
dst.conv1.b.data[:] = src['conv1/7x7_s2'].b.data
# 2 #################################################################
dst.conv2_reduce.W.data[:] = src['conv2/3x3_reduce'].W.data
dst.conv2_reduce.b.data[:] = src['conv2/3x3_reduce'].b.data
dst.conv2.W.data[:] = src['conv2/3x3'].W.data
dst.conv2.b.data[:] = src['conv2/3x3'].b.data
# 3, 4, 5 ###########################################################
_transfer_inception(src, dst, ['3a', '3b',
'4a', '4b', '4c', '4d', '4e',
'5a', '5b'])
# outputs ############################################################
dst.loss1_conv.W.data[:] = src['loss1/conv'].W.data
dst.loss1_conv.b.data[:] = src['loss1/conv'].b.data
dst.loss1_fc1.W.data[:] = src['loss1/fc'].W.data
dst.loss1_fc1.b.data[:] = src['loss1/fc'].b.data
dst.loss1_fc2.W.data[:] = src['loss1/classifier'].W.data
dst.loss1_fc2.b.data[:] = src['loss1/classifier'].b.data
dst.loss2_conv.W.data[:] = src['loss2/conv'].W.data
dst.loss2_conv.b.data[:] = src['loss2/conv'].b.data
dst.loss2_fc1.W.data[:] = src['loss2/fc'].W.data
dst.loss2_fc1.b.data[:] = src['loss2/fc'].b.data
dst.loss2_fc2.W.data[:] = src['loss2/classifier'].W.data
dst.loss2_fc2.b.data[:] = src['loss2/classifier'].b.data
dst.loss3_fc.W.data[:] = src['loss3/classifier'].W.data
dst.loss3_fc.b.data[:] = src['loss3/classifier'].b.data
def _max_pooling_2d(x):
return max_pooling_2d(x, ksize=3, stride=2)
def _local_response_normalization(x):
return local_response_normalization(x, n=5, k=1, alpha=1e-4/5)
def _average_pooling_2d_k5(x):
return average_pooling_2d(x, ksize=5, stride=3)
def _average_pooling_2d_k7(x):
return average_pooling_2d(x, ksize=7, stride=1)
def _dropout(x):
return dropout(x, ratio=0.4)
def _make_npz(path_npz, url, model):
path_caffemodel = download.cached_download(url)
print('Now loading caffemodel (usually it may take few minutes)')
GoogLeNet.convert_caffemodel_to_npz(path_caffemodel, path_npz)
npz.load_npz(path_npz, model)
return model
def _retrieve(name_npz, url, model):
root = download.get_dataset_directory('pfnet/chainer/models/')
path = os.path.join(root, name_npz)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, url, model),
lambda path: npz.load_npz(path, model))
if __name__ == '__main__':
import matplotlib.pyplot as plt
from synset_words import get_synset_words
images = []
for filename in os.listdir('ILSVRC2012'):
images.append(plt.imread(os.path.join('ILSVRC2012', filename)))
break # TODO: Remove this line
model = GoogLeNet()
y = model.predict(images)
top_10 = numpy.argsort(y.data[0])[:-10:-1]
synset_words = get_synset_words()
for i in top_10:
print(y.data[0][i], synset_words[i][1])
print()
|
|
#!/usr/bin/env python
"""Client actions related to administrating the client and its configuration."""
import os
import platform
import socket
import time
import psutil
import logging
from grr.client import actions
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib import stats
class Echo(actions.ActionPlugin):
"""Returns a message to the server."""
in_rdfvalue = rdfvalue.EchoRequest
out_rdfvalue = rdfvalue.LogMessage
def Run(self, args):
self.SendReply(args)
class GetHostname(actions.ActionPlugin):
"""Retrieves the host name of the client."""
out_rdfvalue = rdfvalue.DataBlob
def Run(self, unused_args):
self.SendReply(string=socket.gethostname())
class GetPlatformInfo(actions.ActionPlugin):
"""Retrieves platform information."""
out_rdfvalue = rdfvalue.Uname
def Run(self, unused_args):
"""Populate platform information into a Uname response."""
uname = platform.uname()
fqdn = socket.getfqdn()
system = uname[0]
if system == "Windows":
service_pack = platform.win32_ver()[2]
kernel = uname[3] # 5.1.2600
release = uname[2] # XP, 2000, 7
version = uname[3] + service_pack # 5.1.2600 SP3, 6.1.7601 SP1
elif system == "Darwin":
kernel = uname[2] # 12.2.0
release = "OSX" # OSX
version = platform.mac_ver()[0] # 10.8.2
elif system == "Linux":
kernel = uname[2] # 3.2.5
release = platform.linux_distribution()[0] # Ubuntu
version = platform.linux_distribution()[1] # 12.04
self.SendReply(system=system,
node=uname[1],
release=release,
version=version,
machine=uname[4], # x86, x86_64
kernel=kernel,
fqdn=fqdn)
class Kill(actions.ActionPlugin):
"""A client action for terminating (killing) the client.
Used for testing process respawn.
"""
out_rdfvalue = rdfvalue.GrrMessage
def Run(self, unused_arg):
"""Run the kill."""
# Send a message back to the service to say that we are about to shutdown.
reply = rdfvalue.GrrStatus(status=rdfvalue.GrrStatus.ReturnedStatus.OK)
# Queue up the response message, jump the queue.
self.SendReply(reply, message_type=rdfvalue.GrrMessage.Type.STATUS,
priority=rdfvalue.GrrMessage.Priority.HIGH_PRIORITY + 1)
# Give the http thread some time to send the reply.
self.grr_worker.Sleep(10)
# Die ourselves.
logging.info("Dying on request.")
os._exit(242) # pylint: disable=protected-access
class Hang(actions.ActionPlugin):
"""A client action for simulating the client becoming unresponsive (hanging).
Used for testing nanny terminating the client.
"""
in_rdfvalue = rdfvalue.DataBlob
def Run(self, arg):
# Sleep a really long time.
time.sleep(arg.integer or 6000)
class BusyHang(actions.ActionPlugin):
"""A client action that burns cpu cycles. Used for testing cpu limits."""
in_rdfvalue = rdfvalue.DataBlob
def Run(self, arg):
duration = 5
if arg and arg.integer:
duration = arg.integer
end = time.time() + duration
while time.time() < end:
pass
class Bloat(actions.ActionPlugin):
"""A client action that uses lots of memory for testing."""
in_rdfvalue = rdfvalue.DataBlob
def Run(self, arg):
iterations = arg.integer or 1024 # Gives 1 gb.
l = []
for _ in range(iterations):
l.append("X" * 1048576) # 1 mb.
time.sleep(60)
class GetConfiguration(actions.ActionPlugin):
"""Retrieves the running configuration parameters."""
in_rdfvalue = None
out_rdfvalue = rdfvalue.Dict
BLOCKED_PARAMETERS = ["Client.private_key"]
def Run(self, unused_arg):
"""Retrieve the configuration except for the blocked parameters."""
out = self.out_rdfvalue()
for descriptor in config_lib.CONFIG.type_infos:
if descriptor.name in self.BLOCKED_PARAMETERS:
value = "[Redacted]"
else:
try:
value = config_lib.CONFIG.Get(descriptor.name, default=None)
except (config_lib.Error, KeyError, AttributeError, ValueError) as e:
logging.info("Config reading error: %s", e)
continue
if value is not None:
out[descriptor.name] = value
self.SendReply(out)
class UpdateConfiguration(actions.ActionPlugin):
"""Updates configuration parameters on the client."""
in_rdfvalue = rdfvalue.Dict
UPDATEABLE_FIELDS = ["Client.compression",
"Client.foreman_check_frequency",
"Client.control_urls",
"Client.max_post_size",
"Client.max_out_queue",
"Client.poll_min",
"Client.poll_max",
"Client.poll_slew",
"Client.rss_max"]
def Run(self, arg):
"""Does the actual work."""
disallowed_fields = []
for field, value in arg.items():
if field in self.UPDATEABLE_FIELDS:
config_lib.CONFIG.Set(field, value)
else:
disallowed_fields.append(field)
if disallowed_fields:
logging.warning("Received an update request for restricted field(s) %s.",
",".join(disallowed_fields))
try:
config_lib.CONFIG.Write()
except (IOError, OSError):
pass
def GetClientInformation():
return rdfvalue.ClientInformation(
client_name=config_lib.CONFIG["Client.name"],
client_description=config_lib.CONFIG["Client.description"],
client_version=int(config_lib.CONFIG["Client.version_numeric"]),
build_time=config_lib.CONFIG["Client.build_time"],
labels=config_lib.CONFIG.Get("Client.labels", default=None))
class GetClientInfo(actions.ActionPlugin):
"""Obtains information about the GRR client installed."""
out_rdfvalue = rdfvalue.ClientInformation
def Run(self, unused_args):
self.SendReply(GetClientInformation())
class GetClientStats(actions.ActionPlugin):
"""This retrieves some stats about the GRR process."""
in_rdfvalue = rdfvalue.GetClientStatsRequest
out_rdfvalue = rdfvalue.ClientStats
def Run(self, arg):
"""Returns the client stats."""
if arg is None:
arg = rdfvalue.GetClientStatsRequest()
proc = psutil.Process(os.getpid())
meminfo = proc.memory_info()
response = rdfvalue.ClientStats(
RSS_size=meminfo[0],
VMS_size=meminfo[1],
memory_percent=proc.memory_percent(),
bytes_received=stats.STATS.GetMetricValue(
"grr_client_received_bytes"),
bytes_sent=stats.STATS.GetMetricValue(
"grr_client_sent_bytes"),
create_time=long(proc.create_time() * 1e6),
boot_time=long(psutil.boot_time() * 1e6))
samples = self.grr_worker.stats_collector.cpu_samples
for (timestamp, user, system, percent) in samples:
if arg.start_time < timestamp < arg.end_time:
sample = rdfvalue.CpuSample(
timestamp=timestamp,
user_cpu_time=user,
system_cpu_time=system,
cpu_percent=percent)
response.cpu_samples.Append(sample)
samples = self.grr_worker.stats_collector.io_samples
for (timestamp, read_bytes, write_bytes) in samples:
if arg.start_time < timestamp < arg.end_time:
sample = rdfvalue.IOSample(
timestamp=timestamp,
read_bytes=read_bytes,
write_bytes=write_bytes)
response.io_samples.Append(sample)
self.Send(response)
def Send(self, response):
self.SendReply(response)
class GetClientStatsAuto(GetClientStats):
"""This class is used to send the reply to a well known flow on the server."""
def Send(self, response):
self.grr_worker.SendReply(
response,
session_id=rdfvalue.SessionID("aff4:/flows/W:Stats"),
response_id=0,
request_id=0,
priority=rdfvalue.GrrMessage.Priority.LOW_PRIORITY,
message_type=rdfvalue.GrrMessage.Type.MESSAGE,
require_fastpoll=False)
class SendStartupInfo(actions.ActionPlugin):
in_rdfvalue = None
out_rdfvalue = rdfvalue.StartupInfo
well_known_session_id = rdfvalue.SessionID("aff4:/flows/W:Startup")
def Run(self, unused_arg, ttl=None):
"""Returns the startup information."""
logging.debug("Sending startup information.")
response = rdfvalue.StartupInfo(
boot_time=long(psutil.boot_time() * 1e6),
client_info=GetClientInformation())
self.grr_worker.SendReply(
response,
session_id=self.well_known_session_id,
response_id=0,
request_id=0,
priority=rdfvalue.GrrMessage.Priority.LOW_PRIORITY,
message_type=rdfvalue.GrrMessage.Type.MESSAGE,
require_fastpoll=False,
ttl=ttl)
|
|
from __future__ import unicode_literals
import os
import base64
import datetime
import hashlib
import copy
import itertools
import codecs
import six
from bisect import insort
from moto.core import BaseBackend
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall
from .utils import clean_key_name, _VersionedKeyStore
UPLOAD_ID_BYTES = 43
UPLOAD_PART_MIN_SIZE = 5242880
class FakeKey(object):
def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0):
self.name = name
self.value = value
self.last_modified = datetime.datetime.utcnow()
self._storage_class = storage
self._metadata = {}
self._expiry = None
self._etag = etag
self._version_id = version_id
self._is_versioned = is_versioned
def copy(self, new_name=None):
r = copy.deepcopy(self)
if new_name is not None:
r.name = new_name
return r
def set_metadata(self, metadata, replace=False):
if replace:
self._metadata = {}
self._metadata.update(metadata)
def set_storage_class(self, storage_class):
self._storage_class = storage_class
def append_to_value(self, value):
self.value += value
self.last_modified = datetime.datetime.utcnow()
self._etag = None # must recalculate etag
if self._is_versioned:
self._version_id += 1
else:
self._is_versioned = 0
def restore(self, days):
self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days)
@property
def etag(self):
if self._etag is None:
value_md5 = hashlib.md5()
if isinstance(self.value, six.text_type):
value = self.value.encode("utf-8")
else:
value = self.value
value_md5.update(value)
self._etag = value_md5.hexdigest()
return '"{0}"'.format(self._etag)
@property
def last_modified_ISO8601(self):
return iso_8601_datetime_with_milliseconds(self.last_modified)
@property
def last_modified_RFC1123(self):
# Different datetime formats depending on how the key is obtained
# https://github.com/boto/boto/issues/466
return rfc_1123_datetime(self.last_modified)
@property
def metadata(self):
return self._metadata
@property
def response_dict(self):
r = {
'etag': self.etag,
'last-modified': self.last_modified_RFC1123,
}
if self._storage_class != 'STANDARD':
r['x-amz-storage-class'] = self._storage_class
if self._expiry is not None:
rhdr = 'ongoing-request="false", expiry-date="{0}"'
r['x-amz-restore'] = rhdr.format(self.expiry_date)
if self._is_versioned:
r['x-amz-version-id'] = self._version_id
return r
@property
def size(self):
return len(self.value)
@property
def storage_class(self):
return self._storage_class
@property
def expiry_date(self):
if self._expiry is not None:
return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT")
class FakeMultipart(object):
def __init__(self, key_name, metadata):
self.key_name = key_name
self.metadata = metadata
self.parts = {}
self.partlist = [] # ordered list of part ID's
rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES))
self.id = rand_b64.decode('utf-8').replace('=', '').replace('+', '')
def complete(self, body):
decode_hex = codecs.getdecoder("hex_codec")
total = bytearray()
md5s = bytearray()
last = None
count = 0
for pn, etag in body:
part = self.parts.get(pn)
if part is None or part.etag != etag:
raise InvalidPart()
if last is not None and len(last.value) < UPLOAD_PART_MIN_SIZE:
raise EntityTooSmall()
part_etag = part.etag.replace('"', '')
md5s.extend(decode_hex(part_etag)[0])
total.extend(part.value)
last = part
count += 1
etag = hashlib.md5()
etag.update(bytes(md5s))
return total, "{0}-{1}".format(etag.hexdigest(), count)
def set_part(self, part_id, value):
if part_id < 1:
return
key = FakeKey(part_id, value)
self.parts[part_id] = key
if part_id not in self.partlist:
insort(self.partlist, part_id)
return key
def list_parts(self):
for part_id in self.partlist:
yield self.parts[part_id]
class LifecycleRule(object):
def __init__(self, id=None, prefix=None, status=None, expiration_days=None,
expiration_date=None, transition_days=None,
transition_date=None, storage_class=None):
self.id = id
self.prefix = prefix
self.status = status
self.expiration_days = expiration_days
self.expiration_date = expiration_date
self.transition_days = transition_days
self.transition_date = transition_date
self.storage_class = storage_class
class FakeBucket(object):
def __init__(self, name, region_name):
self.name = name
self.region_name = region_name
self.keys = _VersionedKeyStore()
self.multiparts = {}
self.versioning_status = None
self.rules = []
self.policy = None
@property
def location(self):
return self.region_name
@property
def is_versioned(self):
return self.versioning_status == 'Enabled'
def set_lifecycle(self, rules):
self.rules = []
for rule in rules:
expiration = rule.get('Expiration')
transition = rule.get('Transition')
self.rules.append(LifecycleRule(
id=rule.get('ID'),
prefix=rule['Prefix'],
status=rule['Status'],
expiration_days=expiration.get('Days') if expiration else None,
expiration_date=expiration.get('Date') if expiration else None,
transition_days=transition.get('Days') if transition else None,
transition_date=transition.get('Date') if transition else None,
storage_class=transition['StorageClass'] if transition else None,
))
def delete_lifecycle(self):
self.rules = []
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'DomainName':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "DomainName" ]"')
elif attribute_name == 'WebsiteURL':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"')
raise UnformattedGetAttTemplateException()
class S3Backend(BaseBackend):
def __init__(self):
self.buckets = {}
def create_bucket(self, bucket_name, region_name):
if bucket_name in self.buckets:
raise BucketAlreadyExists(bucket=bucket_name)
new_bucket = FakeBucket(name=bucket_name, region_name=region_name)
self.buckets[bucket_name] = new_bucket
return new_bucket
def get_all_buckets(self):
return self.buckets.values()
def get_bucket(self, bucket_name):
try:
return self.buckets[bucket_name]
except KeyError:
raise MissingBucket(bucket=bucket_name)
def delete_bucket(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if bucket.keys:
# Can't delete a bucket with keys
return False
else:
return self.buckets.pop(bucket_name)
def set_bucket_versioning(self, bucket_name, status):
self.get_bucket(bucket_name).versioning_status = status
def get_bucket_versioning(self, bucket_name):
return self.get_bucket(bucket_name).versioning_status
def get_bucket_versions(self, bucket_name, delimiter=None,
encoding_type=None,
key_marker=None,
max_keys=None,
version_id_marker=None):
bucket = self.get_bucket(bucket_name)
if any((delimiter, encoding_type, key_marker, version_id_marker)):
raise NotImplementedError(
"Called get_bucket_versions with some of delimiter, encoding_type, key_marker, version_id_marker")
return itertools.chain(*(l for _, l in bucket.keys.iterlists()))
def get_bucket_policy(self, bucket_name):
return self.get_bucket(bucket_name).policy
def set_bucket_policy(self, bucket_name, policy):
self.get_bucket(bucket_name).policy = policy
def delete_bucket_policy(self, bucket_name, body):
bucket = self.get_bucket(bucket_name)
bucket.policy = None
def set_bucket_lifecycle(self, bucket_name, rules):
bucket = self.get_bucket(bucket_name)
bucket.set_lifecycle(rules)
def set_key(self, bucket_name, key_name, value, storage=None, etag=None):
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
old_key = bucket.keys.get(key_name, None)
if old_key is not None and bucket.is_versioned:
new_version_id = old_key._version_id + 1
else:
new_version_id = 0
new_key = FakeKey(
name=key_name,
value=value,
storage=storage,
etag=etag,
is_versioned=bucket.is_versioned,
version_id=new_version_id)
bucket.keys[key_name] = new_key
return new_key
def append_to_key(self, bucket_name, key_name, value):
key_name = clean_key_name(key_name)
key = self.get_key(bucket_name, key_name)
key.append_to_value(value)
return key
def get_key(self, bucket_name, key_name, version_id=None):
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
if bucket:
if version_id is None:
return bucket.keys.get(key_name)
else:
for key in bucket.keys.getlist(key_name):
if str(key._version_id) == str(version_id):
return key
def initiate_multipart(self, bucket_name, key_name, metadata):
bucket = self.get_bucket(bucket_name)
new_multipart = FakeMultipart(key_name, metadata)
bucket.multiparts[new_multipart.id] = new_multipart
return new_multipart
def complete_multipart(self, bucket_name, multipart_id, body):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
value, etag = multipart.complete(body)
if value is None:
return
del bucket.multiparts[multipart_id]
key = self.set_key(bucket_name, multipart.key_name, value, etag=etag)
key.set_metadata(multipart.metadata)
return key
def cancel_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
del bucket.multiparts[multipart_id]
def list_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
return list(bucket.multiparts[multipart_id].list_parts())
def get_all_multiparts(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.multiparts
def set_part(self, bucket_name, multipart_id, part_id, value):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
return multipart.set_part(part_id, value)
def copy_part(self, dest_bucket_name, multipart_id, part_id,
src_bucket_name, src_key_name):
src_key_name = clean_key_name(src_key_name)
src_bucket = self.get_bucket(src_bucket_name)
dest_bucket = self.get_bucket(dest_bucket_name)
multipart = dest_bucket.multiparts[multipart_id]
return multipart.set_part(part_id, src_bucket.keys[src_key_name].value)
def prefix_query(self, bucket, prefix, delimiter):
key_results = set()
folder_results = set()
if prefix:
for key_name, key in bucket.keys.items():
if key_name.startswith(prefix):
key_without_prefix = key_name.replace(prefix, "", 1)
if delimiter and delimiter in key_without_prefix:
# If delimiter, we need to split out folder_results
key_without_delimiter = key_without_prefix.split(delimiter)[0]
folder_results.add("{0}{1}{2}".format(prefix, key_without_delimiter, delimiter))
else:
key_results.add(key)
else:
for key_name, key in bucket.keys.items():
if delimiter and delimiter in key_name:
# If delimiter, we need to split out folder_results
folder_results.add(key_name.split(delimiter)[0] + delimiter)
else:
key_results.add(key)
key_results = sorted(key_results, key=lambda key: key.name)
folder_results = [folder_name for folder_name in sorted(folder_results, key=lambda key: key)]
return key_results, folder_results
def delete_key(self, bucket_name, key_name):
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
return bucket.keys.pop(key_name)
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name, storage=None):
src_key_name = clean_key_name(src_key_name)
dest_key_name = clean_key_name(dest_key_name)
src_bucket = self.get_bucket(src_bucket_name)
dest_bucket = self.get_bucket(dest_bucket_name)
key = src_bucket.keys[src_key_name]
if dest_key_name != src_key_name:
key = key.copy(dest_key_name)
dest_bucket.keys[dest_key_name] = key
if storage is not None:
dest_bucket.keys[dest_key_name].set_storage_class(storage)
s3_backend = S3Backend()
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Unit test for the HardwareThread class.
#
# Copyright (c) 2015 carlosperate https://github.com/carlosperate/
#
# Licensed under The MIT License (MIT), a copy can be found in the LICENSE file
#
from __future__ import unicode_literals, absolute_import
import io
import time
import mock
import types
import unittest
import threading
try:
from LightUpHardware.HardwareThread import HardwareThread
except ImportError:
import os
import sys
file_dir = os.path.dirname(os.path.realpath(__file__))
package_dir = os.path.dirname(os.path.dirname(file_dir))
sys.path.insert(0, package_dir)
print("path added: %s" % package_dir)
from LightUpHardware.HardwareThread import HardwareThread
class HardwareThreadTestCase(unittest.TestCase):
""" Tests for HardwareThread class. """
#
# Helper methods
#
def assert_stderr(self, test_srderr, equal=False):
""" Checks the stderr error string and resets it for next test. """
if equal is True:
self.assertEqual(test_srderr.getvalue(), '')
else:
self.assertNotEqual(test_srderr.getvalue(), '')
test_srderr.truncate(0)
test_srderr.write('')
self.assertEqual(test_srderr.getvalue(), '')
#
# Test methods
#
def test_singleton(self):
""" Testing if singleton is working. """
if HardwareThread._HardwareThread__singleton is not None:
HardwareThread._drop()
self.assertIsNone(HardwareThread._HardwareThread__singleton)
instance_1 = HardwareThread()
instance_2 = HardwareThread()
self.assertIsNotNone(HardwareThread._HardwareThread__singleton)
self.assertIsNotNone(instance_1)
self.assertIsNotNone(instance_2)
self.assertEqual(id(instance_1), id(instance_2))
self.assertEqual(id(instance_1),
id(HardwareThread._HardwareThread__singleton))
def test_destructor(self):
""" Testing if destructor is working. """
# Dropping from instance
instance = HardwareThread()
self.assertIsNotNone(instance)
instance._drop()
self.assertIsNone(instance._HardwareThread__singleton)
self.assertIsNone(HardwareThread._HardwareThread__singleton)
# Dropping from class
instance = HardwareThread()
self.assertIsNotNone(instance)
HardwareThread._drop()
self.assertIsNone(instance._HardwareThread__singleton)
self.assertIsNone(HardwareThread._HardwareThread__singleton)
# Trying to drop a not instantiated singleton should print stderr
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
HardwareThread._drop()
self.assert_stderr(test_srderr)
def test_constructor(self):
"""
Tests the class constructor saving data correctly and outputting errors
if required.
"""
if HardwareThread._HardwareThread__singleton is not None:
HardwareThread._drop()
# Ensure the default values are None
self.assertIsNone(HardwareThread._HardwareThread__lamp_time)
self.assertIsNone(HardwareThread._HardwareThread__lamp_duration)
self.assertIsNone(HardwareThread._HardwareThread__room_light_time)
self.assertIsNone(HardwareThread._HardwareThread__room_light_duration)
self.assertIsNone(HardwareThread._HardwareThread__coffee_time)
self.assertIsNone(HardwareThread._HardwareThread__total_time)
self.assertIsNone(HardwareThread.lamp_time)
self.assertIsNone(HardwareThread.lamp_duration)
self.assertIsNone(HardwareThread.room_light_time)
self.assertIsNone(HardwareThread.room_light_duration)
self.assertIsNone(HardwareThread.coffee_time)
self.assertIsNone(HardwareThread.total_time)
# Check constructor with no arguments does not affect default values
hw_thread_instance = HardwareThread()
self.assertIsNotNone(hw_thread_instance)
self.assertIsNone(hw_thread_instance._HardwareThread__lamp_time)
self.assertIsNone(hw_thread_instance._HardwareThread__lamp_duration)
self.assertIsNone(hw_thread_instance._HardwareThread__room_light_time)
self.assertIsNone(
hw_thread_instance._HardwareThread__room_light_duration)
self.assertIsNone(hw_thread_instance._HardwareThread__coffee_time)
self.assertIsNone(hw_thread_instance._HardwareThread__total_time)
self.assertIsNone(HardwareThread._HardwareThread__lamp_time)
self.assertIsNone(HardwareThread._HardwareThread__lamp_duration)
self.assertIsNone(HardwareThread._HardwareThread__room_light_time)
self.assertIsNone(HardwareThread._HardwareThread__room_light_duration)
self.assertIsNone(HardwareThread._HardwareThread__coffee_time)
self.assertIsNone(HardwareThread._HardwareThread__total_time)
self.assertIsNone(HardwareThread.lamp_time)
self.assertIsNone(HardwareThread.lamp_duration)
self.assertIsNone(HardwareThread.room_light_time)
self.assertIsNone(HardwareThread.room_light_duration)
self.assertIsNone(HardwareThread.coffee_time)
self.assertIsNone(HardwareThread.total_time)
# Test that the argument inputs are saved in the class static variables
hw_thread_instance = HardwareThread(
lamp=(1, 2),
room_light=(3, 4),
coffee_time=5,
total_time=6)
self.assertEqual(1, hw_thread_instance._HardwareThread__lamp_time)
self.assertEqual(1, HardwareThread._HardwareThread__lamp_time)
self.assertEqual(1, HardwareThread.lamp_time)
self.assertEqual(2, hw_thread_instance._HardwareThread__lamp_duration)
self.assertEqual(2, HardwareThread._HardwareThread__lamp_duration)
self.assertEqual(2, HardwareThread.lamp_duration)
self.assertEqual(3, hw_thread_instance._HardwareThread__room_light_time)
self.assertEqual(3, HardwareThread._HardwareThread__room_light_time)
self.assertEqual(3, HardwareThread.room_light_time)
self.assertEqual(
4, hw_thread_instance._HardwareThread__room_light_duration)
self.assertEqual(
4, HardwareThread._HardwareThread__room_light_duration)
self.assertEqual(
4, HardwareThread.room_light_duration)
self.assertEqual(5, hw_thread_instance._HardwareThread__coffee_time)
self.assertEqual(5, HardwareThread._HardwareThread__coffee_time)
self.assertEqual(5, HardwareThread.coffee_time)
self.assertEqual(6, hw_thread_instance._HardwareThread__total_time)
self.assertEqual(6, HardwareThread._HardwareThread__total_time)
self.assertEqual(6, HardwareThread.total_time)
# lamp and room light can also take lists
hw_thread_instance = HardwareThread(
lamp=[7, 8],
room_light=[9, 10])
self.assertEqual(7, hw_thread_instance._HardwareThread__lamp_time)
self.assertEqual(7, HardwareThread._HardwareThread__lamp_time)
self.assertEqual(7, HardwareThread.lamp_time)
self.assertEqual(8, hw_thread_instance._HardwareThread__lamp_duration)
self.assertEqual(8, HardwareThread._HardwareThread__lamp_duration)
self.assertEqual(8, HardwareThread.lamp_duration)
self.assertEqual(9, hw_thread_instance._HardwareThread__room_light_time)
self.assertEqual(9, HardwareThread._HardwareThread__room_light_time)
self.assertEqual(9, HardwareThread.room_light_time)
self.assertEqual(
10, hw_thread_instance._HardwareThread__room_light_duration)
self.assertEqual(
10, HardwareThread._HardwareThread__room_light_duration)
self.assertEqual(
10, HardwareThread.room_light_duration)
# Test invalid arguments printing to stderr, so need to capture it
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
# Wrong lamp, constructor only checks for list/touple and length
hw_thread_instance = HardwareThread(lamp=(0, 1, 2))
self.assertIsNotNone(hw_thread_instance)
self.assert_stderr(test_srderr)
hw_thread_instance = HardwareThread(lamp=(1,))
self.assertIsNotNone(hw_thread_instance)
self.assert_stderr(test_srderr)
hw_thread_instance = HardwareThread(lamp=[0, 1, 2])
self.assertIsNotNone(hw_thread_instance)
self.assert_stderr(test_srderr)
hw_thread_instance = HardwareThread(lamp=[1])
self.assertIsNotNone(hw_thread_instance)
self.assert_stderr(test_srderr)
# Wrong room_light,constructor only checks for list/touple and len
hw_thread_instance = HardwareThread(room_light=(0, 1, 2))
self.assertIsNotNone(hw_thread_instance)
self.assert_stderr(test_srderr)
hw_thread_instance = HardwareThread(room_light=(1,))
self.assertIsNotNone(hw_thread_instance)
self.assert_stderr(test_srderr)
hw_thread_instance = HardwareThread(room_light=[0, 1, 2])
self.assertIsNotNone(hw_thread_instance)
self.assert_stderr(test_srderr)
hw_thread_instance = HardwareThread(room_light=[1])
self.assertIsNotNone(hw_thread_instance)
self.assert_stderr(test_srderr)
# The lamp_time, lamp_duration, room_light_time, room_light_duration
# coffee_time, and total_time error checking are done as part of
# the accessors
def test_lamp_time(self):
""" Tests the lamp_time accessors. """
if HardwareThread._HardwareThread__singleton is not None:
HardwareThread._drop()
# Get current original value (None) and ensure getter returns the same
original_lamp_time = \
HardwareThread._HardwareThread__lamp_time
self.assertEqual(original_lamp_time,
HardwareThread.lamp_time)
self.assertIsNone(HardwareThread.lamp_time)
# Set the value without a class instance created
HardwareThread.lamp_time = 1
self.assertEqual(1, HardwareThread._HardwareThread__lamp_time)
self.assertEqual(1, HardwareThread.lamp_time)
# Create the instance with empty constructor and ensure untouched
hw_thread_instance = HardwareThread()
self.assertEqual(1, hw_thread_instance._HardwareThread__lamp_time)
self.assertEqual(1, HardwareThread._HardwareThread__lamp_time)
self.assertEqual(1, HardwareThread.lamp_time)
# Test invalid arguments print to stderr, so need to capture it
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
HardwareThread.lamp_time = 0.0
self.assert_stderr(test_srderr)
HardwareThread.lamp_time = 'String'
self.assert_stderr(test_srderr)
def test_lamp_duration(self):
""" Tests the lamp_duration accessors. """
if HardwareThread._HardwareThread__singleton is not None:
HardwareThread._drop()
# Get current original value (None) and ensure getter returns the same
original_lamp_duration = \
HardwareThread._HardwareThread__lamp_duration
self.assertEqual(original_lamp_duration,
HardwareThread.lamp_duration)
self.assertIsNone(HardwareThread.lamp_duration)
# Set the value without a class instance created
HardwareThread.lamp_duration = 1
self.assertEqual(1, HardwareThread._HardwareThread__lamp_duration)
self.assertEqual(1, HardwareThread.lamp_duration)
# Create the instance with empty constructor and ensure untouched
hw_thread_instance = HardwareThread()
self.assertEqual(1, hw_thread_instance._HardwareThread__lamp_duration)
self.assertEqual(1, HardwareThread._HardwareThread__lamp_duration)
self.assertEqual(1, HardwareThread.lamp_duration)
# Test invalid arguments print to stderr, so need to capture it
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
HardwareThread.lamp_duration = 0.0
self.assert_stderr(test_srderr)
HardwareThread.lamp_duration = 'String'
self.assert_stderr(test_srderr)
def test_room_light_time(self):
""" Tests the room_light_time accessors. """
if HardwareThread._HardwareThread__singleton is not None:
HardwareThread._drop()
# Get current original value (None) and ensure getter returns the same
original_room_light_time = \
HardwareThread._HardwareThread__room_light_time
self.assertEqual(original_room_light_time,
HardwareThread.room_light_time)
self.assertIsNone(HardwareThread.room_light_time)
# Set the value without a class instance created
HardwareThread.room_light_time = 1
self.assertEqual(1, HardwareThread._HardwareThread__room_light_time)
self.assertEqual(1, HardwareThread.room_light_time)
# Create the instance with empty constructor and ensure untouched
hw_thread_instance = HardwareThread()
self.assertEqual(1, hw_thread_instance._HardwareThread__room_light_time)
self.assertEqual(1, HardwareThread._HardwareThread__room_light_time)
self.assertEqual(1, HardwareThread.room_light_time)
# Test invalid arguments print to stderr, so need to capture it
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
HardwareThread.room_light_time = 0.0
self.assert_stderr(test_srderr)
HardwareThread.room_light_time = 'String'
self.assert_stderr(test_srderr)
def test_room_light_duration(self):
""" Tests the room_light_duration accessors. """
if HardwareThread._HardwareThread__singleton is not None:
HardwareThread._drop()
# Get current original value (None) and ensure getter returns the same
original_room_light_duration = \
HardwareThread._HardwareThread__room_light_duration
self.assertEqual(original_room_light_duration,
HardwareThread.room_light_duration)
self.assertIsNone(HardwareThread.room_light_duration)
# Set the value without a class instance created
HardwareThread.room_light_duration = 1
self.assertEqual(1, HardwareThread._HardwareThread__room_light_duration)
self.assertEqual(1, HardwareThread.room_light_duration)
# Create the instance with empty constructor and ensure untouched
hw_thread_instance = HardwareThread()
self.assertEqual(
1, hw_thread_instance._HardwareThread__room_light_duration)
self.assertEqual(1, HardwareThread._HardwareThread__room_light_duration)
self.assertEqual(1, HardwareThread.room_light_duration)
# Test invalid arguments print to stderr, so need to capture it
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
HardwareThread.room_light_duration = 0.0
self.assert_stderr(test_srderr)
HardwareThread.room_light_duration = 'String'
self.assert_stderr(test_srderr)
def test_coffee_time(self):
""" Tests the coffee_time accessors. """
if HardwareThread._HardwareThread__singleton is not None:
HardwareThread._drop()
# Get current original value (None) and ensure getter returns the same
original_coffee_time = \
HardwareThread._HardwareThread__coffee_time
self.assertEqual(original_coffee_time,
HardwareThread.coffee_time)
self.assertIsNone(HardwareThread.coffee_time)
# Set the value without a class instance created
HardwareThread.coffee_time = 1
self.assertEqual(1, HardwareThread._HardwareThread__coffee_time)
self.assertEqual(1, HardwareThread.coffee_time)
# Create the instance with empty constructor and ensure untouched
hw_thread_instance = HardwareThread()
self.assertEqual(1, hw_thread_instance._HardwareThread__coffee_time)
self.assertEqual(1, HardwareThread._HardwareThread__coffee_time)
self.assertEqual(1, HardwareThread.coffee_time)
# Test invalid arguments print to stderr, so need to capture it
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
HardwareThread.coffee_time = 0.0
self.assert_stderr(test_srderr)
HardwareThread.coffee_time = 'String'
self.assert_stderr(test_srderr)
def test_total_time(self):
""" Tests the total_time accessors. """
if HardwareThread._HardwareThread__singleton is not None:
HardwareThread._drop()
# Get current original value (None) and ensure getter returns the same
original_total_time = \
HardwareThread._HardwareThread__total_time
self.assertEqual(original_total_time,
HardwareThread.total_time)
self.assertIsNone(HardwareThread.total_time)
# Set the value without a class instance created
HardwareThread.total_time = 1
self.assertEqual(1, HardwareThread._HardwareThread__total_time)
self.assertEqual(1, HardwareThread.total_time)
# Create the instance with empty constructor and ensure untouched
hw_thread_instance = HardwareThread()
self.assertEqual(1, hw_thread_instance._HardwareThread__total_time)
self.assertEqual(1, HardwareThread._HardwareThread__total_time)
self.assertEqual(1, HardwareThread.total_time)
# Test invalid arguments print to stderr, so need to capture it
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
HardwareThread.total_time = 0.0
self.assert_stderr(test_srderr)
HardwareThread.total_time = 'String'
self.assert_stderr(test_srderr)
def test_setattr(self):
"""
As the class static variables have accesors set in the metaclass the
instance of the class could have attributes set with the same name and
cause undesired effects. The setattr method has been edited to stop this
behaviour and it is tested here.
"""
hw_thread_instance = HardwareThread()
# lamp_time
def invalid_add_attribute():
hw_thread_instance.lamp_time = 5
self.assertRaises(AttributeError, invalid_add_attribute)
def invalid_read_attribute():
a = hw_thread_instance.lamp_time
self.assertRaises(AttributeError, invalid_read_attribute)
# lamp_duration
def invalid_add_attribute():
hw_thread_instance.lamp_duration = 5
self.assertRaises(AttributeError, invalid_add_attribute)
def invalid_read_attribute():
a = hw_thread_instance.lamp_duration
self.assertRaises(AttributeError, invalid_read_attribute)
# room_light_time
def invalid_add_attribute():
hw_thread_instance.room_light_time = 5
self.assertRaises(AttributeError, invalid_add_attribute)
def invalid_read_attribute():
a = hw_thread_instance.room_light_time
self.assertRaises(AttributeError, invalid_read_attribute)
# room_light_duration
def invalid_add_attribute():
hw_thread_instance.room_light_duration = 5
self.assertRaises(AttributeError, invalid_add_attribute)
def invalid_read_attribute():
a = hw_thread_instance.room_light_duration
self.assertRaises(AttributeError, invalid_read_attribute)
# coffee_time
def invalid_add_attribute():
hw_thread_instance.coffee_time = 5
self.assertRaises(AttributeError, invalid_add_attribute)
def invalid_read_attribute():
a = hw_thread_instance.coffee_time
self.assertRaises(AttributeError, invalid_read_attribute)
# total_time
def invalid_add_attribute():
hw_thread_instance.total_time = 5
self.assertRaises(AttributeError, invalid_add_attribute)
def invalid_read_attribute():
a = hw_thread_instance.total_time
self.assertRaises(AttributeError, invalid_read_attribute)
# other key
try:
hw_thread_instance.new_key = 5
except AttributeError:
self.fail('Cannot set new attribute to HardwareThread instance.')
try:
a = hw_thread_instance.new_key
self.assertEqual(a, 5)
except AttributeError:
self.fail('Cannot get new attribute to HardwareThread instance.')
def test_run(self):
"""
Because this unit test is designed to not require the hardware running,
the methods that launch the threads to control the hw will be mocked.
This also allows to check if the methods are called at the requested
intervals.
This test will take over 5 seconds.
"""
if HardwareThread._HardwareThread__singleton is not None:
HardwareThread._drop()
lamp_start = 0
lamp_duration = 2
room_start = 1
room_duration = 2
coffee_time = 3
total_time = 5
start_time = 0
# Mocking the _launch_lamp method
def mock_launch_lamp(cls):
self.launch_lamp_counter += 1
now = time.time()
self.assertAlmostEqual(now, start_time + lamp_start, delta=0.2)
self.launch_lamp_counter = 0
HardwareThread._launch_lamp = \
types.MethodType(mock_launch_lamp, HardwareThread)
# Mocking the _launch_room_light method
def mock_launch_room_light(cls):
self.launch_room_light_counter += 1
now = time.time()
self.assertAlmostEqual(now, start_time + room_start, delta=0.2)
self.launch_room_light_counter = 0
HardwareThread._launch_room_light = \
types.MethodType(mock_launch_room_light, HardwareThread)
# Mocking the _launch_coffee method
def mock_launch_coffee(cls):
self.launch_coffee_counter += 1
now = time.time()
self.assertAlmostEqual(now, start_time + coffee_time, delta=0.2)
self.launch_coffee_counter = 0
HardwareThread._launch_coffee = \
types.MethodType(mock_launch_coffee, HardwareThread)
def assert_thread_not_running():
start_time = time.time()
hw_thread_instance.start()
while hw_thread_instance.isAlive():
pass
self.assertEqual(self.launch_lamp_counter, 0)
self.assertEqual(self.launch_room_light_counter, 0)
self.assertEqual(self.launch_coffee_counter, 0)
# Test that thread will not run if variables are not set, stderr output
hw_thread_instance = HardwareThread()
with mock.patch('sys.stderr', new=io.StringIO()) as test_srderr:
assert_thread_not_running()
HardwareThread.lamp_time = lamp_start
assert_thread_not_running()
self.assert_stderr(test_srderr)
HardwareThread.lamp_duration = lamp_duration
assert_thread_not_running()
self.assert_stderr(test_srderr)
HardwareThread.room_light_time = room_start
assert_thread_not_running()
self.assert_stderr(test_srderr)
HardwareThread.room_light_duration = room_duration
assert_thread_not_running()
self.assert_stderr(test_srderr)
HardwareThread.coffee_time = coffee_time
assert_thread_not_running()
self.assert_stderr(test_srderr)
HardwareThread.total_time = total_time
# Now all variables set, it should run correctly
start_time = time.time()
hw_thread_instance.start()
while hw_thread_instance.isAlive():
pass
end_time = time.time()
self.assertAlmostEqual(total_time, end_time - start_time, delta=0.1)
self.assertEqual(self.launch_lamp_counter, 1)
self.assertEqual(self.launch_room_light_counter, 1)
self.assertEqual(self.launch_coffee_counter, 1)
def test_multirun(self):
"""
Tests that the HardwareThread can be launched several times and that
if one instance launches the thread, and another tries to do the same it
will wait until it is done.
This test can take over 8 seconds (2s per thread launch, 4 launches)
"""
# These thread last 2 seconds
hw_thread_instance_one = HardwareThread(
lamp=(0, 1), room_light=(0, 1), coffee_time=0, total_time=2)
hw_thread_instance_two = HardwareThread()
# Mocking the hardware threads, they will finish as soon as they are
# launched
def mock_hw(cls):
pass
HardwareThread._launch_lamp = \
types.MethodType(mock_hw, HardwareThread)
HardwareThread._launch_room_light = \
types.MethodType(mock_hw, HardwareThread)
HardwareThread._launch_coffee = \
types.MethodType(mock_hw, HardwareThread)
# Launch the hardware thread, ensure it lasts 2 seconds
start_time = time.time()
hw_thread_instance_one.start()
while hw_thread_instance_one.isAlive():
pass
end_time = time.time()
self.assertAlmostEqual(2, end_time - start_time, delta=0.1)
# Ensure the hardware thread can be launched multiple times
start_time = time.time()
hw_thread_instance_one.start()
while hw_thread_instance_one.isAlive():
pass
end_time = time.time()
self.assertAlmostEqual(2, end_time - start_time, delta=0.1)
# Ensure the hardware thread can only be launched once at a time
original_numb_threads = threading.activeCount()
start_time = time.time()
hw_thread_instance_one.start()
time.sleep(0.2) # Enough time for child threads launch and end
hw_thread_count = threading.activeCount()
self.assertEqual(original_numb_threads + 1, hw_thread_count)
hw_thread_instance_two.start()
self.assertEqual(hw_thread_count, threading.activeCount())
while hw_thread_instance_two.isAlive():
pass
end_time = time.time()
self.assertAlmostEqual(2*2, end_time - start_time, delta=0.1*2)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python
# ck_setup.py - checks the veyepar setup - reports what features are ready.
from process import process
from main.models import Show, Location, Client
from django.conf import settings
import pw
import rax_uploader
import archive_uploader
import steve.richardapi
import os
import xml.etree.ElementTree
import requests
# from the blender build scripts
# https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def p_print(text):
print(text)
return
def p_okg(text):
print(bcolors.OKGREEN + text +bcolors.ENDC)
return
def p_warn(text):
print(bcolors.WARNING + text +bcolors.ENDC)
return
def p_fail(text):
print(bcolors.FAIL + text +bcolors.ENDC)
return
class ck_setup(process):
client=None
show=None
def ck_pw(self,
service,
client_id_field=None,
cred_keys=[]):
try:
creds = getattr(pw, service)
except AttributeError as e:
# 'module' object has no attribute 'foo'
p_fail('pw.py does not have: "{}"'.format(service))
return False
keys = creds.keys()
print "keys for service {}: {}".format( service, keys )
key = getattr(self.client, client_id_field, None)
# import code; code.interact(local=locals())
print 'checking client.{} & pw.py for "{}" in: "{}={{..."'.format(
client_id_field,key,service)
if not key:
p_warn('client.{} is blank'.format(client_id_field))
return False
elif key in keys:
p_okg('key "{}" found in "{}" keys.'.format(key,service))
else:
p_warn('key "{}" not found in "{}" keys.'.format(key,service))
raise AttributeError
secrets = creds[key]
# try not to display secret values
print('names of secrets in pw.py {}:{}'.format(
key, secrets.keys() ))
print('checking for existance of {}'.format(cred_keys))
for cred_key in cred_keys:
if cred_key not in secrets:
p_warn('"{}" NOT found.'.format(cred_key))
return secrets
def ck_client(self):
try:
client_slug = self.options.client
except AttributeError as e:
p_fail("No client set in config file or command line.")
raise e
p_okg("client_slug: {}".format(client_slug))
try:
self.client = Client.objects.get(slug=client_slug)
except Client.DoesNotExist as e:
p_fail("client slug not found in db.")
raise e
return
def ck_show(self):
try:
show_slug = self.options.show
except AttributeError as e:
p_fail("No show set in config file or command line.")
raise e
p_okg("show_slug: {}".format(show_slug))
try:
self.show = Show.objects.get(slug=show_slug)
except Show.DoesNotExist as e:
p_fail( "show slug not found in db." )
raise e
return
def ck_dir(self):
if os.path.exists(self.show_dir):
print("~/Videos/showdir exits: {}".format(self.show_dir))
else:
# print(bcolors.FAIL + "~/Videos/showdir not created yet. run mk_dirs.py"+bcolors.ENDC)
p_fail("~/Videos/showdir not created yet. run mk_dirs.py")
def ck_title(self):
title_svg = self.client.title_svg
if title_svg:
print('client.title_svg: {}'.format(title_svg))
else:
print('client.title_svg is blank. using <show.slug>_title.svg')
title_svg = "%s_title.svg" % (self.show.slug,)
# title_svg = os.path.join(self.show_dir, "bling", title_svg)
title_svg = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
"bling",
title_svg)
p_okg(title_svg)
if not os.path.exists(title_svg):
p_fail("title_svg not found.")
raw_svg=open(title_svg).read()
tree=xml.etree.ElementTree.XMLID(raw_svg)
keys = [ 'client',
'show',
'title',
'title2',
'tag1',
'authors', 'presenternames',
'presentertitle',
'twitter_id',
'date',
'time',
'license', ]
print("checking title_svg for object IDs, found:")
found=[]
for key in keys:
if tree[1].has_key(key):
found.append(key)
print(key,tree[1][key].text)
if not found:
p_warn("no keys found in {}".format(title_svg))
def ck_email(self):
if self.client.contacts:
p_okg("client.contacts: {}".format(self.client.contacts))
else:
p_warn("client.contacts: blank")
try:
p_okg("sender: {}".format(settings.EMAIL_SENDER))
# some of these are needed:
"""
EMAIL_USE_TLS
EMAIL_HOST
EMAIL_PORT
EMAIL_HOST_USER
EMAIL_HOST_PASSWORD
"""
except AttributeError as e:
p_warn("settings.EMAIL_SENDER not set.")
def ck_richard(self, secrets):
category_key = self.client.category_key
if category_key:
p_print("client.category_key: {}".format(category_key))
else:
p_warn("client.category_key not set.")
return False
print("checking for category...")
endpoint = "http://{}/api/v2/".format( secrets['host'] )
categories = steve.richardapi.get_all_categories(endpoint)
cat_titles = [cat['title'] for cat in categories]
print("found {} categories. first 5: {}".format(
len(categories), cat_titles[:5] ))
if category_key in cat_titles:
p_okg('client.category_key:"{}" found.'.format(category_key))
else:
p_fail('client.category_key:"{}" NOT found.'.format(category_key))
return
def ck_cdn(self, secrets):
if self.client.rax_id:
rax_id = self.client.rax_id
p_okg("client.rax_id: {}".format(rax_id))
else:
p_warn("client.rax_id not set.")
return
if self.client.bucket_id:
bucket_id = self.client.bucket_id
p_okg("client.bucket_id: {}".format(bucket_id))
else:
p_fail("client.bucket_id not set.")
print "checking for valid bucket..."
cf = rax_uploader.auth(rax_id)
containers = cf.get_all_containers()
container_names = [container.name for container in containers]
print "container_names", container_names
if bucket_id in container_names:
p_okg('"{}" found.'.format(bucket_id))
else:
p_fail('"{}" not found.'.format(bucket_id))
# not sure what to do with this...
# container = cf.get_container(bucket_id)
return
def ck_archive(self, secrets):
if self.client.archive_id:
archive_id = self.client.archive_id
p_okg("client.archive_id: {}".format(archive_id))
else:
p_warn("client.archive_id not set.")
return
if self.client.bucket_id:
bucket_id = self.client.bucket_id
p_okg("client.bucket_id: {}".format(bucket_id))
else:
p_fail("client.bucket_id not set.")
print "auth..."
service = archive_uploader.auth(archive_id)
print "checking for valid bucket..."
buckets = service.get_all_buckets()
bucket_names = [bucket.name for bucket in buckets]
print "bucket_names", bucket_names
if bucket_id in bucket_names:
p_okg('"{}" found.'.format(bucket_id))
else:
p_fail('"{}" not found.'.format(bucket_id))
p_fail('Either create it or set client.bucket_id to one of the above.')
bucket = service.get_bucket(bucket_id,headers={})
# not sure what to do with this...
# container = cf.get_container(bucket_id)
return
def ck_youtube(self, secrets):
ret = True
print("looking for client_secrets.json...")
if not os.path.exists('client_secrets.json'):
p_fail("client_secrets.json NOT found.")
ret = False
print("looking for {}".format(secrets['filename']))
if not os.path.exists(secrets['filename']):
p_fail("{} NOT found.".format(secrets['filename']))
ret = False
return ret
def ck_schedule_api(self):
schedule_url = self.show.schedule_url
if schedule_url:
p_okg("show.schedule_url: {}".format(schedule_url))
else:
p_warn("no show.schedule_url")
return
if schedule_url.startswith('file'):
url = schedule_url[7:]
if not os.path.exists(url):
print("{} NOT found.".format(url))
else:
print("getting...")
session = requests.session()
response = session.get(schedule_url, verify=False)
text = response.text
print text[:75]
auth = pw.addeps.get(self.show.slug, None)
if auth is not None:
print("found in pw.addeps:{}".format(auth.keys()))
def work(self):
"""
what has happened so far:
files=config.read(['veyepar.cfg','~/veyepar.cfg'
self.options, self.args = parser.parse_args()
"""
try:
self.ck_client()
self.ck_show()
self.set_dirs(self.show)
self.ck_dir()
self.ck_title()
self.ck_schedule_api()
# email uses local_settings.py
# self.ck_pw("smtp","email_id")
self.ck_email()
secrets = self.ck_pw( "richard","richard_id",
['host', 'api_key', ])
if secrets:
self.ck_richard(secrets)
secrets = self.ck_pw("rax","rax_id",['api_key', 'user'])
if secrets:
self.ck_cdn(secrets)
secrets = self.ck_pw( "yt","youtube_id",['filename', ])
if secrets:
self.ck_youtube(secrets)
secrets = self.ck_pw( "archive","archive_id",['access','secret'])
if secrets:
self.ck_archive(secrets)
except Exception as e:
print "tests stopped at"
print e.message
print e.__class__, e
# import code; code.interact(local=locals())
# raise e
return
if __name__ == '__main__':
p=ck_setup()
p.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
import scipy.signal
from tvm.topi.nn.util import get_pad_tuple
from tvm.contrib import nnpack
import pytest
@tvm.testing.requires_llvm
def test_fully_connected_inference():
n = 1024
l = 128
m = 235
bias = te.var("bias", dtype="float32")
A = te.placeholder((l,), name="A")
B = te.placeholder((m, l), name="B")
C = nnpack.fully_connected_inference(A, B)
D = te.compute(C.shape, lambda i: C[i] + bias, name="D")
s = te.create_schedule(D.op)
def verify(target="llvm"):
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True):
pytest.skip("extern function is not available")
if not nnpack.is_available():
pytest.skip("nnpack is not available")
ctx = tvm.cpu(0)
f = tvm.build(s, [A, B, D, bias], target)
a = tvm.nd.array(np.random.uniform(size=(l)).astype(A.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=(m, l)).astype(B.dtype), ctx)
d = tvm.nd.array(np.zeros((m,), dtype=D.dtype), ctx)
bb = 10.0
f(a, b, d, bb)
tvm.testing.assert_allclose(d.asnumpy(), np.dot(a.asnumpy(), b.asnumpy().T) + bb, rtol=1e-5)
verify()
def np_conv(na, nw, padding, stride=1):
batch, in_channel, in_height, in_width = na.shape
_, num_filter, kernel_h, kernel_w = nw.shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel_h, kernel_w))
pad_h = pad_top + pad_bottom
pad_w = pad_left + pad_right
out_channel = num_filter
out_height = (in_height - kernel_h + pad_h) // stride_h + 1
out_width = (in_width - kernel_w + pad_w) // stride_w + 1
nb = np.zeros((batch, out_channel, out_height, out_width))
for n in range(batch):
for f in range(out_channel):
for c in range(in_channel):
if pad_h > 0 or pad_w > 0:
apad = np.zeros((in_height + pad_h, in_width + pad_w))
apad[pad_top : pad_top + in_height, pad_left : pad_left + in_width] = na[n, c]
else:
apad = na[n, c]
out = scipy.signal.convolve2d(apad, np.rot90(np.rot90(nw[f, c])), mode="valid")
nb[n, f] += out[::stride, ::stride]
return nb
@tvm.testing.requires_llvm
def test_convolution_inference():
BATCH = 8
IH = 48
IW = 48
IC = 16
OC = 16
K = 3
PAD = 1
STRIDE = 1
OH = (IH + 2 * PAD - K) + 1
OW = (IW + 2 * PAD - K) + 1
dshape = (BATCH, IC, IH, IW)
kshape = (OC, IC, K, K)
bshape = (OC,)
oshape = (BATCH, OC, OH, OW)
data = te.placeholder(dshape, name="data")
kernel = te.placeholder(kshape, name="kernel")
bias = te.placeholder(bshape, name="bias")
def verify(target="llvm", algorithm=nnpack.ConvolutionAlgorithm.AUTO, with_bias=True):
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True):
pytest.skip("extern function is not available")
if not nnpack.is_available():
pytest.skip("nnpack is not available")
ctx = tvm.cpu(0)
output = nnpack.convolution_inference(
data,
kernel,
bias if with_bias else None,
[PAD, PAD, PAD, PAD],
[STRIDE, STRIDE],
algorithm=algorithm,
)
s = te.create_schedule(output.op)
f = tvm.build(s, [data, kernel, bias, output], target)
na = np.random.uniform(size=dshape).astype(data.dtype)
nb = np.random.uniform(size=kshape).astype(kernel.dtype)
nc = np.zeros(bshape, dtype=bias.dtype)
ta = tvm.nd.array(na, ctx)
tb = tvm.nd.array(nb, ctx)
tc = tvm.nd.array(nc, ctx)
td = tvm.nd.array(np.zeros(oshape, dtype=output.dtype), ctx)
f(ta, tb, tc, td)
nd = np_conv(np.reshape(na, (BATCH, IC, IH, IW)), nb, PAD, STRIDE) + nc.reshape(
1, bshape[0], 1, 1
)
tvm.testing.assert_allclose(td.asnumpy(), nd.reshape(BATCH, IC, IH, IW), rtol=1e-5)
for algorithm in [
nnpack.ConvolutionAlgorithm.AUTO,
nnpack.ConvolutionAlgorithm.FFT_8x8,
nnpack.ConvolutionAlgorithm.FFT_16x16,
nnpack.ConvolutionAlgorithm.WT_8x8,
nnpack.ConvolutionAlgorithm.IMPLICIT_GEMM,
nnpack.ConvolutionAlgorithm.WT_8x8_FP16,
]:
for with_bias in [True, False]:
verify(algorithm=algorithm, with_bias=with_bias)
@tvm.testing.requires_llvm
def test_convolution_inference_without_weight_transform():
BATCH = 6
IH = 48
IW = 48
IC = 16
OC = 16
K = 3
PAD = 1
STRIDE = 1
OH = (IH + 2 * PAD - K) + 1
OW = (IW + 2 * PAD - K) + 1
dshape = (BATCH, IC, IH, IW)
kshape = (OC, IC, K, K)
bshape = (OC,)
oshape = (BATCH, OC, OH, OW)
data = te.placeholder(dshape, name="data")
kernel = te.placeholder(kshape, name="kernel")
bias = te.placeholder(bshape, name="bias")
def verify(target="llvm", algorithm=nnpack.ConvolutionAlgorithm.AUTO, with_bias=True):
if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True):
pytest.skip("extern function is not available")
if not nnpack.is_available():
pytest.skip("nnpack is not available")
ctx = tvm.cpu(0)
transformed_kernel = nnpack.convolution_inference_weight_transform(
kernel, algorithm=algorithm
)
output = nnpack.convolution_inference_without_weight_transform(
data,
transformed_kernel,
bias if with_bias else None,
[PAD, PAD, PAD, PAD],
[STRIDE, STRIDE],
algorithm=algorithm,
)
s = te.create_schedule(output.op)
f = tvm.build(s, [data, kernel, bias, output], target)
na = np.random.uniform(size=dshape).astype(data.dtype)
nb = np.random.uniform(size=kshape).astype(kernel.dtype)
nc = (
np.random.uniform(size=bshape).astype(bias.dtype)
if with_bias
else np.zeros(bshape, dtype=bias.dtype)
)
ta = tvm.nd.array(na, ctx)
tb = tvm.nd.array(nb, ctx)
tc = tvm.nd.array(nc, ctx)
td = tvm.nd.array(np.zeros(oshape, dtype=output.dtype), ctx)
f(ta, tb, tc, td)
nd = np_conv(np.reshape(na, (BATCH, IC, IH, IW)), nb, PAD, STRIDE) + nc.reshape(
1, bshape[0], 1, 1
)
tvm.testing.assert_allclose(td.asnumpy(), nd.reshape(BATCH, IC, IH, IW), rtol=1e-5)
for algorithm in [nnpack.ConvolutionAlgorithm.WT_8x8]:
for with_bias in [True, False]:
verify(algorithm=algorithm, with_bias=with_bias)
if __name__ == "__main__":
pytest.main()
|
|
"""Debugger Tests"""
import sys
import pytest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # NOQA
from circuits import Debugger
from circuits.core import Event, Component
class test(Event):
"""test Event"""
class App(Component):
def test(self, raiseException=False):
if raiseException:
raise Exception()
class Logger(object):
error_msg = None
debug_msg = None
def error(self, msg):
self.error_msg = msg
def debug(self, msg):
self.debug_msg = msg
def test_main():
app = App()
stderr = StringIO()
debugger = Debugger(file=stderr)
debugger.register(app)
while len(app):
app.flush()
stderr.seek(0)
stderr.truncate()
assert debugger._events
e = Event()
app.fire(e)
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s == str(e)
stderr.seek(0)
stderr.truncate()
debugger._events = False
assert not debugger._events
e = Event()
app.fire(e)
stderr.seek(0)
s = stderr.read().strip()
assert s == ""
stderr.seek(0)
stderr.truncate()
def test_file(tmpdir):
logfile = str(tmpdir.ensure("debug.log"))
stderr = open(logfile, "w+")
app = App()
debugger = Debugger(file=stderr)
debugger.register(app)
while len(app):
app.flush()
stderr.seek(0)
stderr.truncate()
assert debugger._events
e = Event()
app.fire(e)
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s == str(e)
stderr.seek(0)
stderr.truncate()
debugger._events = False
assert not debugger._events
e = Event()
app.fire(e)
stderr.seek(0)
s = stderr.read().strip()
assert s == ""
stderr.seek(0)
stderr.truncate()
def test_filename(tmpdir):
if "__pypy__" in sys.modules:
pytest.skip("Broken on pypy")
logfile = str(tmpdir.ensure("debug.log"))
stderr = open(logfile, "r+")
app = App()
debugger = Debugger(file=logfile)
debugger.register(app)
while len(app):
app.flush()
stderr.seek(0)
stderr.truncate()
assert debugger._events
e = Event()
app.fire(e)
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s == str(e)
stderr.seek(0)
stderr.truncate()
debugger._events = False
assert not debugger._events
e = Event()
app.fire(e)
stderr.seek(0)
s = stderr.read().strip()
assert s == ""
stderr.seek(0)
stderr.truncate()
def test_exceptions():
app = App()
stderr = StringIO()
debugger = Debugger(file=stderr)
debugger.register(app)
while len(app):
app.flush()
stderr.seek(0)
stderr.truncate()
assert debugger._events
assert debugger._errors
e = test(raiseException=True)
app.fire(e)
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s == str(e)
stderr.seek(0)
stderr.truncate()
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s.startswith("<exception[*]")
stderr.seek(0)
stderr.truncate()
debugger._events = False
debugger._errors = False
assert not debugger._events
assert not debugger._errors
e = test(raiseException=True)
app.fire(e)
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s == ""
stderr.seek(0)
stderr.truncate()
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s == ""
def test_IgnoreEvents():
app = App()
stderr = StringIO()
debugger = Debugger(file=stderr)
debugger.register(app)
while len(app):
app.flush()
stderr.seek(0)
stderr.truncate()
assert debugger._events
debugger.IgnoreEvents.extend(["test"])
e = Event()
app.fire(e)
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s == str(e)
stderr.seek(0)
stderr.truncate()
e = test()
app.fire(e)
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s == ""
stderr.seek(0)
stderr.truncate()
def test_IgnoreChannels():
app = App()
stderr = StringIO()
debugger = Debugger(file=stderr)
debugger.register(app)
while len(app):
app.flush()
stderr.seek(0)
stderr.truncate()
assert debugger._events
debugger.IgnoreChannels.extend([("*", "test")])
e = Event()
app.fire(e)
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s == str(e)
stderr.seek(0)
stderr.truncate()
e = test()
app.fire(e)
app.flush()
stderr.seek(0)
s = stderr.read().strip()
assert s == ""
stderr.seek(0)
stderr.truncate()
def test_Logger_debug():
app = App()
logger = Logger()
debugger = Debugger(logger=logger)
debugger.register(app)
while len(app):
app.flush()
e = Event()
app.fire(e)
app.flush()
assert logger.debug_msg == repr(e)
def test_Logger_error():
app = App()
logger = Logger()
debugger = Debugger(logger=logger)
debugger.register(app)
while len(app):
app.flush()
e = test(raiseException=True)
app.fire(e)
while len(app):
app.flush()
assert logger.error_msg.startswith("ERROR <handler[*][test] (App.test)> (")
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from timeit import timeit
import unittest
import app.parser
class PerformanceTestCases(unittest.TestCase):
def test_array_vs_getter(self):
setup = """data = ['a'] * 100\n"""
setup += """def get(n):\n"""
setup += """ return data[n]\n"""
setup += """class B:\n"""
setup += """ def get_via_member(self, n):\n"""
setup += """ return data[n]\n"""
setup += """ def __getitem__(self, n):\n"""
setup += """ return data[n]\n"""
setup += """b = B()\n"""
a = timeit("""x = data[5]\n""", setup=setup, number=10000)
b = timeit("""x = get(5)\n""", setup=setup, number=10000)
c = timeit("""x = b.get_via_member(5)\n""", setup=setup, number=10000)
d = timeit("""x = b[5]\n""", setup=setup, number=10000)
# print("\n%s | %s %s | %s %s | %s %s" % (a, b, a/b, c, a/c, d, a/d))
# Calling a function or member is significantly slower than direct
# access.
self.assertGreater(b, a * 1.5)
self.assertGreater(c, a * 2)
self.assertGreater(d, a * 2)
def test_slice_vs_startswith(self):
if 0:
setup = """x = 'a' * 100\n"""
a = timeit("""x[:2] == " "\n""", setup=setup, number=100000)
b = timeit("""x.startswith(" ")\n""", setup=setup, number=100000)
c = timeit("""x[0] == " " and x[1] == " "\n""", setup=setup, number=100000)
# print("\na %s, b %s, c %s | %s %s" % (a, b, c, c, a/c))
# Calling a function or member is significantly slower than direct
# access.
# This check is not performing the same in Python3.
self.assertGreater(b, a * 1.7) # b is much slower.
self.assertGreater(b, c * 1.9) # b is much slower.
self.assertGreater(a, c * 0.6) # a and c are similar.
self.assertGreater(c, a * 0.4) # a and c are similar.
def test_default_parameter(self):
setup = """def with_default(a, b=None):\n"""
setup += """ if b is not None: return b\n"""
setup += """ return a*a\n"""
setup += """def without_default(a, b):\n"""
setup += """ if b is -1: return b\n"""
setup += """ return a*b\n"""
a = timeit("""with_default(5);""" * 100, setup=setup, number=10000)
b = timeit("""without_default(5, 0);""" * 100, setup=setup, number=10000)
# Assert that neither too much faster than the other
# Note: empirically, this is affected (on a MacBook Air) by whether the
# machine is running from battery or plugged in.
# It also appears to vary between Python 2 and Python 3.
self.assertGreater(a, b * 0.6)
self.assertGreater(b, a * 0.6)
def test_char_vs_ord(self):
setup = """a="apple"\n"""
a = timeit("""a[0] > "z";""" * 100, setup=setup, number=10000)
b = timeit("""ord(a[0]) > 100;""" * 100, setup=setup, number=10000)
self.assertGreater(b, a)
def test_insert1(self):
# Disabled due to running time.
if 0:
# This tests a performance assumption. If this test fails, the
# program should still work fine, but it may not run as fast as it
# could by using different assumptions.
#
# Insert into an array of strings is expected to be faster than
# insert into a contiguous buffer of similar size.
#
# This is why ci_edit uses both a self.data buffer and a
# self.lines[] array. Though splitting the data into lines is also
# expensive, see tests below.
#
# At 1,000 bytes the performance is similar.
a = timeit(
'data1 = data1[:500] + "x" + data1[500:]',
setup='data1 = "a" * 1000',
number=10000,
)
b = timeit(
'data2[5] = data2[5][:50] + "x" + data2[5][50:]',
setup='data2 = ["a" * 100] * 10',
number=10000,
)
self.assertGreater(a, b * 0.8)
self.assertLess(a, b * 4)
# At 10,000 bytes the array of strings is 1.4 to 3 times faster.
a = timeit(
'data1 = data1[:5000] + "x" + data1[5000:]',
setup='data1 = "a" * 10000',
number=10000,
)
b = timeit(
'data2[50] = data2[50][:50] + "x" + data2[50][50:]',
setup='data2 = ["a" * 100] * 100',
number=10000,
)
self.assertGreater(a, b * 1.4)
self.assertLess(a, b * 4)
# At 100,000 bytes the array of strings is 12 to 24 times faster.
a = timeit(
'data1 = data1[:50000] + "x" + data1[50000:]',
setup='data1 = "a" * 100000',
number=10000,
)
b = timeit(
'data2[500] = data2[500][:50] + "x" + data2[500][50:]',
setup='data2 = ["a" * 100] * 1000',
number=10000,
)
self.assertGreater(a, b * 12)
self.assertLess(a, b * 24)
def test_split_insert(self):
# Disabled due to running time.
if 0:
# This tests a performance assumption. If this test fails, the
# program should still work fine, but it may not run as fast as it
# could by using different assumptions.
#
# With frequent splitting the performance reverses.
for lineCount in (100, 1000, 5000):
half = lineCount // 2
a = timeit(
r"""data2 = data1.split('\n'); \
data2[%s] = data2[%s][:50] + "x" + data2[%s][50:]; \
"""
% (half, half, half),
setup=r"""data1 = ("a" * 100 + '\n') * %s""" % (lineCount,),
number=10000,
)
b = timeit(
'data1 = data1[:%s] + "x" + data1[%s:]' % (half, half),
setup=r"""data1 = ("a" * 100 + '\n') * %s""" % (lineCount,),
number=10000,
)
print("\n%9s: %s %s" % (lineCount, a, b))
self.assertGreater(a, b)
def test_split_insert_balance(self):
# Disabled due to running time.
if 0:
# This tests a performance assumption. If this test fails, the
# program should still work fine, but it may not run as fast as it
# could by using different assumptions.
#
# With 5 inserts between splits, the performance is nearly the same.
for lineCount in (100, 1000, 5000):
half = lineCount // 2
a = timeit(
r"""data2 = data1.split('\n');"""
+ (
r"""data2[%s] = data2[%s][:50] + "x" + data2[%s][50:]; \
"""
% (half, half, half)
)
* 5,
setup=r"""data1 = ("a" * 100 + '\n') * %s""" % (lineCount,),
number=10000,
)
b = timeit(
('data1 = data1[:%s] + "x" + data1[%s:]; ' % (half, half)) * 5,
setup=r"""data1 = ("a" * 100 + '\n') * %s""" % (lineCount,),
number=10000,
)
print("\n%9s: %s %s" % (lineCount, a, b))
def test_instance_vs_tuple(self):
# Disabled due to running time.
if 0:
# This tests a performance assumption. If this test fails, the
# program should still work fine, but it may not run as fast as it
# could by using different assumptions.
for lineCount in (100, 1000, 5000):
a = timeit(
r"""
a = Node()
a.foo = 5
a.bar = 'hi'
a.blah = 7
foo.append(a)
""",
setup=r"""
foo = []
class Node:
def __init__(self):
self.foo = None
self.bar = None
self.blah = None
""",
number=10000,
)
b = timeit(
r"""
a = (5, 'hi', 7)
foo.append(a)
""",
setup=r"""
foo = []
""",
number=10000,
)
print("\n%9s: %s %s" % (lineCount, a, b))
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import glob
import os
import sys
import time
import unittest
basepath = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, basepath + '/gen-py.tornado')
sys.path.insert(0, glob.glob(os.path.join(basepath, '../../lib/py/build/lib*'))[0])
try:
__import__('tornado')
except ImportError:
print("module `tornado` not found, skipping test")
sys.exit(0)
from tornado import gen
from tornado.testing import AsyncTestCase, get_unused_port, gen_test
from thrift import TTornado
from thrift.Thrift import TApplicationException
from thrift.protocol import TBinaryProtocol
from ThriftTest import ThriftTest
from ThriftTest.ttypes import Xception, Xtruct
class TestHandler(object):
def __init__(self, test_instance):
self.test_instance = test_instance
def testVoid(self):
pass
def testString(self, s):
if s == 'unexpected_error':
raise Exception(s)
return s
def testByte(self, b):
return b
def testI16(self, i16):
return i16
def testI32(self, i32):
return i32
def testI64(self, i64):
return i64
def testDouble(self, dub):
return dub
def testBinary(self, thing):
return thing
def testStruct(self, thing):
return thing
def testException(self, s):
if s == 'Xception':
raise Xception(1001, s)
elif s == 'throw_undeclared':
raise ValueError('testing undeclared exception')
def testOneway(self, seconds):
start = time.time()
def fire_oneway():
end = time.time()
self.test_instance.stop((start, end, seconds))
self.test_instance.io_loop.add_timeout(
datetime.timedelta(seconds=seconds),
fire_oneway)
raise Exception('testing exception in oneway method')
def testNest(self, thing):
return thing
@gen.coroutine
def testMap(self, thing):
yield gen.moment
raise gen.Return(thing)
def testSet(self, thing):
return thing
def testList(self, thing):
return thing
def testEnum(self, thing):
return thing
def testTypedef(self, thing):
return thing
class ThriftTestCase(AsyncTestCase):
def setUp(self):
super(ThriftTestCase, self).setUp()
self.port = get_unused_port()
# server
self.handler = TestHandler(self)
self.processor = ThriftTest.Processor(self.handler)
self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.server = TTornado.TTornadoServer(self.processor, self.pfactory, io_loop=self.io_loop)
self.server.bind(self.port)
self.server.start(1)
# client
transport = TTornado.TTornadoStreamTransport('localhost', self.port, io_loop=self.io_loop)
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.io_loop.run_sync(transport.open)
self.client = ThriftTest.Client(transport, pfactory)
@gen_test
def test_void(self):
v = yield self.client.testVoid()
self.assertEqual(v, None)
@gen_test
def test_string(self):
v = yield self.client.testString('Python')
self.assertEqual(v, 'Python')
@gen_test
def test_byte(self):
v = yield self.client.testByte(63)
self.assertEqual(v, 63)
@gen_test
def test_i32(self):
v = yield self.client.testI32(-1)
self.assertEqual(v, -1)
v = yield self.client.testI32(0)
self.assertEqual(v, 0)
@gen_test
def test_i64(self):
v = yield self.client.testI64(-34359738368)
self.assertEqual(v, -34359738368)
@gen_test
def test_double(self):
v = yield self.client.testDouble(-5.235098235)
self.assertEqual(v, -5.235098235)
@gen_test
def test_struct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = yield self.client.testStruct(x)
self.assertEqual(y.string_thing, "Zero")
self.assertEqual(y.byte_thing, 1)
self.assertEqual(y.i32_thing, -3)
self.assertEqual(y.i64_thing, -5)
@gen_test
def test_oneway(self):
self.client.testOneway(1)
v = yield self.client.testI32(-1)
self.assertEqual(v, -1)
@gen_test
def test_map(self):
"""
TestHandler.testMap is a coroutine, this test checks if gen.Return() from a coroutine works.
"""
expected = {1: 1}
res = yield self.client.testMap(expected)
self.assertEqual(res, expected)
@gen_test
def test_exception(self):
try:
yield self.client.testException('Xception')
except Xception as ex:
self.assertEqual(ex.errorCode, 1001)
self.assertEqual(ex.message, 'Xception')
else:
self.fail("should have gotten exception")
try:
yield self.client.testException('throw_undeclared')
except TApplicationException:
pass
else:
self.fail("should have gotten exception")
yield self.client.testException('Safe')
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(ThriftTestCase))
return suite
if __name__ == '__main__':
unittest.TestProgram(defaultTest='suite',
testRunner=unittest.TextTestRunner(verbosity=1))
|
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import datetime
import tempfile
import time
from binascii import unhexlify
import six
import cassandra
from cassandra import util
from cassandra.cqltypes import (
CassandraType, DateRangeType, DateType, DecimalType,
EmptyValue, LongType, SetType, UTF8Type,
cql_typename, int8_pack, int64_pack, lookup_casstype,
lookup_casstype_simple, parse_casstype_args,
int32_pack, Int32Type, ListType, MapType
)
from cassandra.encoder import cql_quote
from cassandra.pool import Host
from cassandra.metadata import Token
from cassandra.policies import ConvictionPolicy, SimpleConvictionPolicy
from cassandra.protocol import (
read_inet, read_longstring, read_string,
read_stringmap, write_inet, write_longstring,
write_string, write_stringmap
)
from cassandra.query import named_tuple_factory
from cassandra.util import (
OPEN_BOUND, Date, DateRange, DateRangeBound,
DateRangePrecision, Time, ms_timestamp_from_datetime,
datetime_from_timestamp
)
from tests.unit.util import check_sequence_consistency
class TypeTests(unittest.TestCase):
def test_lookup_casstype_simple(self):
"""
Ensure lookup_casstype_simple returns the correct classes
"""
self.assertEqual(lookup_casstype_simple('AsciiType'), cassandra.cqltypes.AsciiType)
self.assertEqual(lookup_casstype_simple('LongType'), cassandra.cqltypes.LongType)
self.assertEqual(lookup_casstype_simple('BytesType'), cassandra.cqltypes.BytesType)
self.assertEqual(lookup_casstype_simple('BooleanType'), cassandra.cqltypes.BooleanType)
self.assertEqual(lookup_casstype_simple('CounterColumnType'), cassandra.cqltypes.CounterColumnType)
self.assertEqual(lookup_casstype_simple('DecimalType'), cassandra.cqltypes.DecimalType)
self.assertEqual(lookup_casstype_simple('DoubleType'), cassandra.cqltypes.DoubleType)
self.assertEqual(lookup_casstype_simple('FloatType'), cassandra.cqltypes.FloatType)
self.assertEqual(lookup_casstype_simple('InetAddressType'), cassandra.cqltypes.InetAddressType)
self.assertEqual(lookup_casstype_simple('Int32Type'), cassandra.cqltypes.Int32Type)
self.assertEqual(lookup_casstype_simple('UTF8Type'), cassandra.cqltypes.UTF8Type)
self.assertEqual(lookup_casstype_simple('DateType'), cassandra.cqltypes.DateType)
self.assertEqual(lookup_casstype_simple('SimpleDateType'), cassandra.cqltypes.SimpleDateType)
self.assertEqual(lookup_casstype_simple('ByteType'), cassandra.cqltypes.ByteType)
self.assertEqual(lookup_casstype_simple('ShortType'), cassandra.cqltypes.ShortType)
self.assertEqual(lookup_casstype_simple('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType)
self.assertEqual(lookup_casstype_simple('TimeType'), cassandra.cqltypes.TimeType)
self.assertEqual(lookup_casstype_simple('UUIDType'), cassandra.cqltypes.UUIDType)
self.assertEqual(lookup_casstype_simple('IntegerType'), cassandra.cqltypes.IntegerType)
self.assertEqual(lookup_casstype_simple('MapType'), cassandra.cqltypes.MapType)
self.assertEqual(lookup_casstype_simple('ListType'), cassandra.cqltypes.ListType)
self.assertEqual(lookup_casstype_simple('SetType'), cassandra.cqltypes.SetType)
self.assertEqual(lookup_casstype_simple('CompositeType'), cassandra.cqltypes.CompositeType)
self.assertEqual(lookup_casstype_simple('ColumnToCollectionType'), cassandra.cqltypes.ColumnToCollectionType)
self.assertEqual(lookup_casstype_simple('ReversedType'), cassandra.cqltypes.ReversedType)
self.assertEqual(lookup_casstype_simple('DurationType'), cassandra.cqltypes.DurationType)
self.assertEqual(lookup_casstype_simple('DateRangeType'), cassandra.cqltypes.DateRangeType)
self.assertEqual(str(lookup_casstype_simple('unknown')), str(cassandra.cqltypes.mkUnrecognizedType('unknown')))
def test_lookup_casstype(self):
"""
Ensure lookup_casstype returns the correct classes
"""
self.assertEqual(lookup_casstype('AsciiType'), cassandra.cqltypes.AsciiType)
self.assertEqual(lookup_casstype('LongType'), cassandra.cqltypes.LongType)
self.assertEqual(lookup_casstype('BytesType'), cassandra.cqltypes.BytesType)
self.assertEqual(lookup_casstype('BooleanType'), cassandra.cqltypes.BooleanType)
self.assertEqual(lookup_casstype('CounterColumnType'), cassandra.cqltypes.CounterColumnType)
self.assertEqual(lookup_casstype('DateType'), cassandra.cqltypes.DateType)
self.assertEqual(lookup_casstype('DecimalType'), cassandra.cqltypes.DecimalType)
self.assertEqual(lookup_casstype('DoubleType'), cassandra.cqltypes.DoubleType)
self.assertEqual(lookup_casstype('FloatType'), cassandra.cqltypes.FloatType)
self.assertEqual(lookup_casstype('InetAddressType'), cassandra.cqltypes.InetAddressType)
self.assertEqual(lookup_casstype('Int32Type'), cassandra.cqltypes.Int32Type)
self.assertEqual(lookup_casstype('UTF8Type'), cassandra.cqltypes.UTF8Type)
self.assertEqual(lookup_casstype('DateType'), cassandra.cqltypes.DateType)
self.assertEqual(lookup_casstype('TimeType'), cassandra.cqltypes.TimeType)
self.assertEqual(lookup_casstype('ByteType'), cassandra.cqltypes.ByteType)
self.assertEqual(lookup_casstype('ShortType'), cassandra.cqltypes.ShortType)
self.assertEqual(lookup_casstype('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType)
self.assertEqual(lookup_casstype('UUIDType'), cassandra.cqltypes.UUIDType)
self.assertEqual(lookup_casstype('IntegerType'), cassandra.cqltypes.IntegerType)
self.assertEqual(lookup_casstype('MapType'), cassandra.cqltypes.MapType)
self.assertEqual(lookup_casstype('ListType'), cassandra.cqltypes.ListType)
self.assertEqual(lookup_casstype('SetType'), cassandra.cqltypes.SetType)
self.assertEqual(lookup_casstype('CompositeType'), cassandra.cqltypes.CompositeType)
self.assertEqual(lookup_casstype('ColumnToCollectionType'), cassandra.cqltypes.ColumnToCollectionType)
self.assertEqual(lookup_casstype('ReversedType'), cassandra.cqltypes.ReversedType)
self.assertEqual(lookup_casstype('DurationType'), cassandra.cqltypes.DurationType)
self.assertEqual(lookup_casstype('DateRangeType'), cassandra.cqltypes.DateRangeType)
self.assertEqual(str(lookup_casstype('unknown')), str(cassandra.cqltypes.mkUnrecognizedType('unknown')))
self.assertRaises(ValueError, lookup_casstype, 'AsciiType~')
def test_casstype_parameterized(self):
self.assertEqual(LongType.cass_parameterized_type_with(()), 'LongType')
self.assertEqual(LongType.cass_parameterized_type_with((), full=True), 'org.apache.cassandra.db.marshal.LongType')
self.assertEqual(SetType.cass_parameterized_type_with([DecimalType], full=True), 'org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.DecimalType)')
self.assertEqual(LongType.cql_parameterized_type(), 'bigint')
subtypes = (cassandra.cqltypes.UTF8Type, cassandra.cqltypes.UTF8Type)
self.assertEqual('map<text, text>',
cassandra.cqltypes.MapType.apply_parameters(subtypes).cql_parameterized_type())
def test_datetype_from_string(self):
# Ensure all formats can be parsed, without exception
for format in cassandra.cqltypes.cql_timestamp_formats:
date_string = str(datetime.datetime.now().strftime(format))
cassandra.cqltypes.DateType.interpret_datestring(date_string)
def test_cql_typename(self):
"""
Smoke test cql_typename
"""
self.assertEqual(cql_typename('DateType'), 'timestamp')
self.assertEqual(cql_typename('org.apache.cassandra.db.marshal.ListType(IntegerType)'), 'list<varint>')
def test_named_tuple_colname_substitution(self):
colnames = ("func(abc)", "[applied]", "func(func(abc))", "foo_bar", "foo_bar_")
rows = [(1, 2, 3, 4, 5)]
result = named_tuple_factory(colnames, rows)[0]
self.assertEqual(result[0], result.func_abc)
self.assertEqual(result[1], result.applied)
self.assertEqual(result[2], result.func_func_abc)
self.assertEqual(result[3], result.foo_bar)
self.assertEqual(result[4], result.foo_bar_)
def test_parse_casstype_args(self):
class FooType(CassandraType):
typename = 'org.apache.cassandra.db.marshal.FooType'
def __init__(self, subtypes, names):
self.subtypes = subtypes
self.names = names
@classmethod
def apply_parameters(cls, subtypes, names):
return cls(subtypes, [unhexlify(six.b(name)) if name is not None else name for name in names])
class BarType(FooType):
typename = 'org.apache.cassandra.db.marshal.BarType'
ctype = parse_casstype_args(''.join((
'org.apache.cassandra.db.marshal.FooType(',
'63697479:org.apache.cassandra.db.marshal.UTF8Type,',
'BarType(61646472657373:org.apache.cassandra.db.marshal.UTF8Type),',
'7a6970:org.apache.cassandra.db.marshal.UTF8Type',
')')))
self.assertEqual(FooType, ctype.__class__)
self.assertEqual(UTF8Type, ctype.subtypes[0])
# middle subtype should be a BarType instance with its own subtypes and names
self.assertIsInstance(ctype.subtypes[1], BarType)
self.assertEqual([UTF8Type], ctype.subtypes[1].subtypes)
self.assertEqual([b"address"], ctype.subtypes[1].names)
self.assertEqual(UTF8Type, ctype.subtypes[2])
self.assertEqual([b'city', None, b'zip'], ctype.names)
def test_empty_value(self):
self.assertEqual(str(EmptyValue()), 'EMPTY')
def test_datetype(self):
now_time_seconds = time.time()
now_datetime = datetime.datetime.utcfromtimestamp(now_time_seconds)
# Cassandra timestamps in millis
now_timestamp = now_time_seconds * 1e3
# same results serialized
self.assertEqual(DateType.serialize(now_datetime, 0), DateType.serialize(now_timestamp, 0))
# deserialize
# epoc
expected = 0
self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.utcfromtimestamp(expected))
# beyond 32b
expected = 2 ** 33
self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(2242, 3, 16, 12, 56, 32))
# less than epoc (PYTHON-119)
expected = -770172256
self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(1945, 8, 5, 23, 15, 44))
# work around rounding difference among Python versions (PYTHON-230)
expected = 1424817268.274
self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2015, 2, 24, 22, 34, 28, 274000))
# Large date overflow (PYTHON-452)
expected = 2177403010.123
self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2038, 12, 31, 10, 10, 10, 123000))
def test_collection_null_support(self):
"""
Test that null values in collection are decoded properly.
@jira_ticket PYTHON-1123
"""
int_list = ListType.apply_parameters([Int32Type])
value = (
int32_pack(2) + # num items
int32_pack(-1) + # size of item1
int32_pack(4) + # size of item2
int32_pack(42) # item2
)
self.assertEqual(
[None, 42],
int_list.deserialize(value, 3)
)
set_list = SetType.apply_parameters([Int32Type])
self.assertEqual(
{None, 42},
set(set_list.deserialize(value, 3))
)
value = (
int32_pack(2) + # num items
int32_pack(4) + # key size of item1
int32_pack(42) + # key item1
int32_pack(-1) + # value size of item1
int32_pack(-1) + # key size of item2
int32_pack(4) + # value size of item2
int32_pack(42) # value of item2
)
map_list = MapType.apply_parameters([Int32Type, Int32Type])
self.assertEqual(
[(42, None), (None, 42)],
map_list.deserialize(value, 3)._items # OrderedMapSerializedKey
)
def test_write_read_string(self):
with tempfile.TemporaryFile() as f:
value = u'test'
write_string(f, value)
f.seek(0)
self.assertEqual(read_string(f), value)
def test_write_read_longstring(self):
with tempfile.TemporaryFile() as f:
value = u'test'
write_longstring(f, value)
f.seek(0)
self.assertEqual(read_longstring(f), value)
def test_write_read_stringmap(self):
with tempfile.TemporaryFile() as f:
value = {'key': 'value'}
write_stringmap(f, value)
f.seek(0)
self.assertEqual(read_stringmap(f), value)
def test_write_read_inet(self):
with tempfile.TemporaryFile() as f:
value = ('192.168.1.1', 9042)
write_inet(f, value)
f.seek(0)
self.assertEqual(read_inet(f), value)
with tempfile.TemporaryFile() as f:
value = ('2001:db8:0:f101::1', 9042)
write_inet(f, value)
f.seek(0)
self.assertEqual(read_inet(f), value)
def test_cql_quote(self):
self.assertEqual(cql_quote(u'test'), "'test'")
self.assertEqual(cql_quote('test'), "'test'")
self.assertEqual(cql_quote(0), '0')
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
try:
utc_timezone = datetime.timezone.utc
except AttributeError:
utc_timezone = UTC()
class DateRangeTypeTests(unittest.TestCase):
dt = datetime.datetime(1990, 2, 3, 13, 58, 45, 777777)
timestamp = 1485963732404
def test_month_rounding_creation_failure(self):
"""
@jira_ticket PYTHON-912
"""
feb_stamp = ms_timestamp_from_datetime(
datetime.datetime(2018, 2, 25, 18, 59, 59, 0)
)
dr = DateRange(OPEN_BOUND,
DateRangeBound(feb_stamp, DateRangePrecision.MONTH))
dt = datetime_from_timestamp(dr.upper_bound.milliseconds / 1000)
self.assertEqual(dt.day, 28)
# Leap year
feb_stamp_leap_year = ms_timestamp_from_datetime(
datetime.datetime(2016, 2, 25, 18, 59, 59, 0)
)
dr = DateRange(OPEN_BOUND,
DateRangeBound(feb_stamp_leap_year, DateRangePrecision.MONTH))
dt = datetime_from_timestamp(dr.upper_bound.milliseconds / 1000)
self.assertEqual(dt.day, 29)
def test_decode_precision(self):
self.assertEqual(DateRangeType._decode_precision(6), 'MILLISECOND')
def test_decode_precision_error(self):
with self.assertRaises(ValueError):
DateRangeType._decode_precision(-1)
def test_encode_precision(self):
self.assertEqual(DateRangeType._encode_precision('SECOND'), 5)
def test_encode_precision_error(self):
with self.assertRaises(ValueError):
DateRangeType._encode_precision('INVALID')
def test_deserialize_single_value(self):
serialized = (int8_pack(0) +
int64_pack(self.timestamp) +
int8_pack(3))
self.assertEqual(
DateRangeType.deserialize(serialized, 5),
util.DateRange(value=util.DateRangeBound(
value=datetime.datetime(2017, 2, 1, 15, 42, 12, 404000),
precision='HOUR')
)
)
def test_deserialize_closed_range(self):
serialized = (int8_pack(1) +
int64_pack(self.timestamp) +
int8_pack(2) +
int64_pack(self.timestamp) +
int8_pack(6))
self.assertEqual(
DateRangeType.deserialize(serialized, 5),
util.DateRange(
lower_bound=util.DateRangeBound(
value=datetime.datetime(2017, 2, 1, 0, 0),
precision='DAY'
),
upper_bound=util.DateRangeBound(
value=datetime.datetime(2017, 2, 1, 15, 42, 12, 404000),
precision='MILLISECOND'
)
)
)
def test_deserialize_open_high(self):
serialized = (int8_pack(2) +
int64_pack(self.timestamp) +
int8_pack(3))
deserialized = DateRangeType.deserialize(serialized, 5)
self.assertEqual(
deserialized,
util.DateRange(
lower_bound=util.DateRangeBound(
value=datetime.datetime(2017, 2, 1, 15, 0),
precision='HOUR'
),
upper_bound=util.OPEN_BOUND
)
)
def test_deserialize_open_low(self):
serialized = (int8_pack(3) +
int64_pack(self.timestamp) +
int8_pack(4))
deserialized = DateRangeType.deserialize(serialized, 5)
self.assertEqual(
deserialized,
util.DateRange(
lower_bound=util.OPEN_BOUND,
upper_bound=util.DateRangeBound(
value=datetime.datetime(2017, 2, 1, 15, 42, 20, 1000),
precision='MINUTE'
)
)
)
def test_deserialize_single_open(self):
self.assertEqual(
util.DateRange(value=util.OPEN_BOUND),
DateRangeType.deserialize(int8_pack(5), 5)
)
def test_serialize_single_value(self):
serialized = (int8_pack(0) +
int64_pack(self.timestamp) +
int8_pack(5))
deserialized = DateRangeType.deserialize(serialized, 5)
self.assertEqual(
deserialized,
util.DateRange(
value=util.DateRangeBound(
value=datetime.datetime(2017, 2, 1, 15, 42, 12),
precision='SECOND'
)
)
)
def test_serialize_closed_range(self):
serialized = (int8_pack(1) +
int64_pack(self.timestamp) +
int8_pack(5) +
int64_pack(self.timestamp) +
int8_pack(0))
deserialized = DateRangeType.deserialize(serialized, 5)
self.assertEqual(
deserialized,
util.DateRange(
lower_bound=util.DateRangeBound(
value=datetime.datetime(2017, 2, 1, 15, 42, 12),
precision='SECOND'
),
upper_bound=util.DateRangeBound(
value=datetime.datetime(2017, 12, 31),
precision='YEAR'
)
)
)
def test_serialize_open_high(self):
serialized = (int8_pack(2) +
int64_pack(self.timestamp) +
int8_pack(2))
deserialized = DateRangeType.deserialize(serialized, 5)
self.assertEqual(
deserialized,
util.DateRange(
lower_bound=util.DateRangeBound(
value=datetime.datetime(2017, 2, 1),
precision='DAY'
),
upper_bound=util.OPEN_BOUND
)
)
def test_serialize_open_low(self):
serialized = (int8_pack(2) +
int64_pack(self.timestamp) +
int8_pack(3))
deserialized = DateRangeType.deserialize(serialized, 5)
self.assertEqual(
deserialized,
util.DateRange(
lower_bound=util.DateRangeBound(
value=datetime.datetime(2017, 2, 1, 15),
precision='HOUR'
),
upper_bound=util.OPEN_BOUND
)
)
def test_deserialize_both_open(self):
serialized = (int8_pack(4))
deserialized = DateRangeType.deserialize(serialized, 5)
self.assertEqual(
deserialized,
util.DateRange(
lower_bound=util.OPEN_BOUND,
upper_bound=util.OPEN_BOUND
)
)
def test_serialize_single_open(self):
serialized = DateRangeType.serialize(util.DateRange(
value=util.OPEN_BOUND,
), 5)
self.assertEqual(int8_pack(5), serialized)
def test_serialize_both_open(self):
serialized = DateRangeType.serialize(util.DateRange(
lower_bound=util.OPEN_BOUND,
upper_bound=util.OPEN_BOUND
), 5)
self.assertEqual(int8_pack(4), serialized)
def test_failure_to_serialize_no_value_object(self):
self.assertRaises(ValueError, DateRangeType.serialize, object(), 5)
def test_failure_to_serialize_no_bounds_object(self):
class no_bounds_object(object):
value = lower_bound = None
self.assertRaises(ValueError, DateRangeType.serialize, no_bounds_object, 5)
def test_serialized_value_round_trip(self):
vals = [six.b('\x01\x00\x00\x01%\xe9a\xf9\xd1\x06\x00\x00\x01v\xbb>o\xff\x00'),
six.b('\x01\x00\x00\x00\xdcm\x03-\xd1\x06\x00\x00\x01v\xbb>o\xff\x00')]
for serialized in vals:
self.assertEqual(
serialized,
DateRangeType.serialize(DateRangeType.deserialize(serialized, 0), 0)
)
def test_serialize_zero_datetime(self):
"""
Test serialization where timestamp = 0
Companion test for test_deserialize_zero_datetime
@since 2.0.0
@jira_ticket PYTHON-729
@expected_result serialization doesn't raise an error
@test_category data_types
"""
DateRangeType.serialize(util.DateRange(
lower_bound=(datetime.datetime(1970, 1, 1), 'YEAR'),
upper_bound=(datetime.datetime(1970, 1, 1), 'YEAR')
), 5)
def test_deserialize_zero_datetime(self):
"""
Test deserialization where timestamp = 0
Reproduces PYTHON-729
@since 2.0.0
@jira_ticket PYTHON-729
@expected_result deserialization doesn't raise an error
@test_category data_types
"""
DateRangeType.deserialize(
(int8_pack(1) +
int64_pack(0) + int8_pack(0) +
int64_pack(0) + int8_pack(0)),
5
)
class DateRangeDeserializationTests(unittest.TestCase):
"""
These tests iterate over different timestamp values
and assert deserialization gives the expected value
"""
starting_lower_value = 1514744108923
"""
Sample starting value for the lower bound for DateRange
"""
starting_upper_value = 2148761288922
"""
Sample starting value for the upper bound for DateRange
"""
epoch = datetime.datetime(1970, 1, 1, tzinfo=utc_timezone)
def test_deserialize_date_range_milliseconds(self):
"""
Test rounding from DateRange for milliseconds
@since 2.0.0
@jira_ticket PYTHON-898
@expected_result
@test_category data_types
"""
for i in range(1000):
lower_value = self.starting_lower_value + i
upper_value = self.starting_upper_value + i
dr = DateRange(DateRangeBound(lower_value, DateRangePrecision.MILLISECOND),
DateRangeBound(upper_value, DateRangePrecision.MILLISECOND))
self.assertEqual(lower_value, dr.lower_bound.milliseconds)
self.assertEqual(upper_value, dr.upper_bound.milliseconds)
def test_deserialize_date_range_seconds(self):
"""
Test rounding from DateRange for milliseconds
@since 2.0.0
@jira_ticket PYTHON-898
@expected_result
@test_category data_types
"""
def truncate_last_figures(number, n=3):
"""
Truncates last n digits of a number
"""
return int(str(number)[:-n] + '0' * n)
for i in range(1000):
lower_value = self.starting_lower_value + i * 900
upper_value = self.starting_upper_value + i * 900
dr = DateRange(DateRangeBound(lower_value, DateRangePrecision.SECOND),
DateRangeBound(upper_value, DateRangePrecision.SECOND))
self.assertEqual(truncate_last_figures(lower_value), dr.lower_bound.milliseconds)
upper_value = truncate_last_figures(upper_value) + 999
self.assertEqual(upper_value, dr.upper_bound.milliseconds)
def test_deserialize_date_range_minutes(self):
"""
Test rounding from DateRange for seconds
@since 2.4.0
@jira_ticket PYTHON-898
@expected_result
@test_category data_types
"""
self._deserialize_date_range({"second": 0, "microsecond": 0},
DateRangePrecision.MINUTE,
# This lambda function given a truncated date adds
# one day minus one microsecond in microseconds
lambda x: x + 59 * 1000 + 999,
lambda original_value, i: original_value + i * 900 * 50)
def test_deserialize_date_range_hours(self):
"""
Test rounding from DateRange for hours
@since 2.4.0
@jira_ticket PYTHON-898
@expected_result
@test_category data_types
"""
self._deserialize_date_range({"minute": 0, "second": 0, "microsecond": 0},
DateRangePrecision.HOUR,
# This lambda function given a truncated date adds
# one hour minus one microsecond in microseconds
lambda x: x +
59 * 60 * 1000 +
59 * 1000 +
999,
lambda original_value, i: original_value + i * 900 * 50 * 60)
def test_deserialize_date_range_day(self):
"""
Test rounding from DateRange for hours
@since 2.4.0
@jira_ticket PYTHON-898
@expected_result
@test_category data_types
"""
self._deserialize_date_range({"hour": 0, "minute": 0, "second": 0, "microsecond": 0},
DateRangePrecision.DAY,
# This lambda function given a truncated date adds
# one day minus one microsecond in microseconds
lambda x: x +
23 * 60 * 60 * 1000 +
59 * 60 * 1000 +
59 * 1000 +
999,
lambda original_value, i: original_value + i * 900 * 50 * 60 * 24)
@unittest.skip("This is currently failig, see PYTHON-912")
def test_deserialize_date_range_month(self):
"""
Test rounding from DateRange for months
@since 2.4.0
@jira_ticket PYTHON-898
@expected_result
@test_category data_types
"""
def get_upper_bound(seconds):
"""
function that given a truncated date in seconds from the epoch returns that same date
but with the microseconds set to 999999, seconds to 59, minutes to 59, hours to 23
and days 28, 29, 30 or 31 depending on the month.
The way to do this is to add one month and leave the date at YEAR-MONTH-01 00:00:00 000000.
Then substract one millisecond.
"""
dt = datetime.datetime.fromtimestamp(seconds / 1000.0, tz=utc_timezone)
dt = dt + datetime.timedelta(days=32)
dt = dt.replace(day=1) - datetime.timedelta(microseconds=1)
return int((dt - self.epoch).total_seconds() * 1000)
self._deserialize_date_range({"day": 1, "hour": 0, "minute": 0, "second": 0, "microsecond": 0},
DateRangePrecision.MONTH,
get_upper_bound,
lambda original_value, i: original_value + i * 900 * 50 * 60 * 24 * 30)
def test_deserialize_date_range_year(self):
"""
Test rounding from DateRange for year
@since 2.4.0
@jira_ticket PYTHON-898
@expected_result
@test_category data_types
"""
def get_upper_bound(seconds):
"""
function that given a truncated date in seconds from the epoch returns that same date
but with the microseconds set to 999999, seconds to 59, minutes to 59, hours to 23
days 28, 29, 30 or 31 depending on the month and months to 12.
The way to do this is to add one year and leave the date at YEAR-01-01 00:00:00 000000.
Then substract one millisecond.
"""
dt = datetime.datetime.fromtimestamp(seconds / 1000.0, tz=utc_timezone)
dt = dt + datetime.timedelta(days=370)
dt = dt.replace(day=1) - datetime.timedelta(microseconds=1)
diff = time.mktime(dt.timetuple()) - time.mktime(self.epoch.timetuple())
return diff * 1000 + 999
# This doesn't work for big values because it loses precision
#return int((dt - self.epoch).total_seconds() * 1000)
self._deserialize_date_range({"month": 1, "day": 1, "hour": 0, "minute": 0, "second": 0, "microsecond": 0},
DateRangePrecision.YEAR,
get_upper_bound,
lambda original_value, i: original_value + i * 900 * 50 * 60 * 24 * 30 * 12 * 7)
def _deserialize_date_range(self, truncate_kwargs, precision,
round_up_truncated_upper_value, increment_loop_variable):
"""
This functions iterates over several DateRange objects determined by
lower_value upper_value which are given as a value that represents seconds since the epoch.
We want to make sure the lower_value is correctly rounded down and the upper value is correctly rounded up.
In the case of rounding down we verify that the rounded down value
has the appropriate fields set to the minimum they could possible have. That is
1 for months, 1 for days, 0 for hours, 0 for minutes, 0 for seconds, 0 for microseconds.
We use the generic function truncate_date which depends on truncate_kwargs for this
In the case of rounding up we verify that the rounded up value has the appropriate fields set
to the maximum they could possible have. This is calculated by round_up_truncated_upper_value
which input is the truncated value from before. It is passed as an argument as the way
of calculating this is is different for every precision.
:param truncate_kwargs: determine what values to truncate in truncate_date
:param precision: :class:`~util.DateRangePrecision`
:param round_up_truncated_upper_value: this is a function that gets a truncated date and
returns a new date with some fields set to the maximum possible value
:param increment_loop_variable: this is a function that given a starting value and the iteration
value returns a new date to serve as lower_bound/upper_bound. We need this because the value by which
dates are incremented depends on if the precision is seconds, minutes, hours, days and months
:return:
"""
def truncate_date(number):
"""
Given a date in seconds since the epoch truncates ups to a certain precision depending on
truncate_kwargs.
The return is the truncated date in seconds since the epoch.
For example if truncate_kwargs = {"hour": 0, "minute": 0, "second": 0, "microsecond": 0} the returned
value will be the original given date but with the hours, minutes, seconds and microseconds set to 0
"""
dt = datetime.datetime.fromtimestamp(number / 1000.0, tz=utc_timezone)
dt = dt.replace(**truncate_kwargs)
return round((dt - self.epoch).total_seconds() * 1000.0)
for i in range(1000):
# We increment the lower_value and upper_value according to increment_loop_variable
lower_value = increment_loop_variable(self.starting_lower_value, i)
upper_value = increment_loop_variable(self.starting_upper_value, i)
# Inside the __init__ for DateRange the rounding up and down should happen
dr = DateRange(DateRangeBound(lower_value, precision),
DateRangeBound(upper_value, precision))
# We verify that rounded value corresponds with what we would expect
self.assertEqual(truncate_date(lower_value), dr.lower_bound.milliseconds)
upper_value = round_up_truncated_upper_value(truncate_date(upper_value))
self.assertEqual(upper_value, dr.upper_bound.milliseconds)
class TestOrdering(unittest.TestCase):
def _shuffle_lists(self, *args):
return [item for sublist in zip(*args) for item in sublist]
def test_host_order(self):
"""
Test Host class is ordered consistently
@since 3.9
@jira_ticket PYTHON-714
@expected_result the hosts are ordered correctly
@test_category data_types
"""
hosts = [Host(addr, SimpleConvictionPolicy) for addr in
("127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4")]
hosts_equal = [Host(addr, SimpleConvictionPolicy) for addr in
("127.0.0.1", "127.0.0.1")]
hosts_equal_conviction = [Host("127.0.0.1", SimpleConvictionPolicy), Host("127.0.0.1", ConvictionPolicy)]
check_sequence_consistency(self, hosts)
check_sequence_consistency(self, hosts_equal, equal=True)
check_sequence_consistency(self, hosts_equal_conviction, equal=True)
def test_date_order(self):
"""
Test Date class is ordered consistently
@since 3.9
@jira_ticket PYTHON-714
@expected_result the dates are ordered correctly
@test_category data_types
"""
dates_from_string = [Date("2017-01-01"), Date("2017-01-05"), Date("2017-01-09"), Date("2017-01-13")]
dates_from_string_equal = [Date("2017-01-01"), Date("2017-01-01")]
check_sequence_consistency(self, dates_from_string)
check_sequence_consistency(self, dates_from_string_equal, equal=True)
date_format = "%Y-%m-%d"
dates_from_value = [
Date((datetime.datetime.strptime(dtstr, date_format) -
datetime.datetime(1970, 1, 1)).days)
for dtstr in ("2017-01-02", "2017-01-06", "2017-01-10", "2017-01-14")
]
dates_from_value_equal = [Date(1), Date(1)]
check_sequence_consistency(self, dates_from_value)
check_sequence_consistency(self, dates_from_value_equal, equal=True)
dates_from_datetime = [Date(datetime.datetime.strptime(dtstr, date_format))
for dtstr in ("2017-01-03", "2017-01-07", "2017-01-11", "2017-01-15")]
dates_from_datetime_equal = [Date(datetime.datetime.strptime("2017-01-01", date_format)),
Date(datetime.datetime.strptime("2017-01-01", date_format))]
check_sequence_consistency(self, dates_from_datetime)
check_sequence_consistency(self, dates_from_datetime_equal, equal=True)
dates_from_date = [
Date(datetime.datetime.strptime(dtstr, date_format).date()) for dtstr in
("2017-01-04", "2017-01-08", "2017-01-12", "2017-01-16")
]
dates_from_date_equal = [datetime.datetime.strptime(dtstr, date_format) for dtstr in
("2017-01-09", "2017-01-9")]
check_sequence_consistency(self, dates_from_date)
check_sequence_consistency(self, dates_from_date_equal, equal=True)
check_sequence_consistency(self, self._shuffle_lists(dates_from_string, dates_from_value,
dates_from_datetime, dates_from_date))
def test_timer_order(self):
"""
Test Time class is ordered consistently
@since 3.9
@jira_ticket PYTHON-714
@expected_result the times are ordered correctly
@test_category data_types
"""
time_from_int = [Time(1000), Time(4000), Time(7000), Time(10000)]
time_from_int_equal = [Time(1), Time(1)]
check_sequence_consistency(self, time_from_int)
check_sequence_consistency(self, time_from_int_equal, equal=True)
time_from_datetime = [Time(datetime.time(hour=0, minute=0, second=0, microsecond=us))
for us in (2, 5, 8, 11)]
time_from_datetime_equal = [Time(datetime.time(hour=0, minute=0, second=0, microsecond=us))
for us in (1, 1)]
check_sequence_consistency(self, time_from_datetime)
check_sequence_consistency(self, time_from_datetime_equal, equal=True)
time_from_string = [Time("00:00:00.000003000"), Time("00:00:00.000006000"),
Time("00:00:00.000009000"), Time("00:00:00.000012000")]
time_from_string_equal = [Time("00:00:00.000004000"), Time("00:00:00.000004000")]
check_sequence_consistency(self, time_from_string)
check_sequence_consistency(self, time_from_string_equal, equal=True)
check_sequence_consistency(self, self._shuffle_lists(time_from_int, time_from_datetime, time_from_string))
def test_token_order(self):
"""
Test Token class is ordered consistently
@since 3.9
@jira_ticket PYTHON-714
@expected_result the tokens are ordered correctly
@test_category data_types
"""
tokens = [Token(1), Token(2), Token(3), Token(4)]
tokens_equal = [Token(1), Token(1)]
check_sequence_consistency(self, tokens)
check_sequence_consistency(self, tokens_equal, equal=True)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_ami_copy
short_description: copies AMI between AWS regions, return new image id
description:
- Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.)
version_added: "2.0"
options:
source_region:
description:
- The source region the AMI should be copied from.
required: true
source_image_id:
description:
- The ID of the AMI in source region that should be copied.
required: true
name:
description:
- The name of the new AMI to copy. (As of 2.3 the default is 'default', in prior versions it was 'null'.)
default: "default"
description:
description:
- An optional human-readable string describing the contents and purpose of the new AMI.
encrypted:
description:
- Whether or not the destination snapshots of the copied AMI should be encrypted.
version_added: "2.2"
kms_key_id:
description:
- KMS key id used to encrypt image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
version_added: "2.2"
wait:
description:
- Wait for the copied AMI to be in state 'available' before returning.
type: bool
default: 'no'
wait_timeout:
description:
- How long before wait gives up, in seconds. Prior to 2.3 the default was 1200.
- From 2.3-2.5 this option was deprecated in favor of boto3 waiter defaults.
This was reenabled in 2.6 to allow timeouts greater than 10 minutes.
default: 600
tags:
description:
- A hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
tag_equality:
description:
- Whether to use tags if the source AMI already exists in the target region. If this is set, and all tags match
in an existing AMI, the AMI will not be copied again.
default: false
version_added: 2.6
author:
- Amir Moulavi (@amir343) <amir.moulavi@gmail.com>
- Tim C <defunct@defunct.io>
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
'''
EXAMPLES = '''
# Basic AMI Copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
# AMI copy wait until available
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
wait: yes
wait_timeout: 1200 # Default timeout is 600
register: image_id
# Named AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
name: My-Awesome-AMI
description: latest patch
# Tagged AMI copy (will not copy the same AMI twice)
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
tags:
Name: My-Super-AMI
Patch: 1.2.3
tag_equality: yes
# Encrypted AMI copy
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
# Encrypted AMI copy with specified key
- ec2_ami_copy:
source_region: us-east-1
region: eu-west-1
source_image_id: ami-xxxxxxx
encrypted: yes
kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
'''
RETURN = '''
image_id:
description: AMI ID of the copied AMI
returned: always
type: string
sample: ami-e689729e
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list
try:
from botocore.exceptions import ClientError, NoCredentialsError, WaiterError, BotoCoreError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def copy_image(module, ec2):
"""
Copies an AMI
module : AnsibleModule object
ec2: ec2 connection object
"""
image = None
changed = False
tags = module.params.get('tags')
params = {'SourceRegion': module.params.get('source_region'),
'SourceImageId': module.params.get('source_image_id'),
'Name': module.params.get('name'),
'Description': module.params.get('description'),
'Encrypted': module.params.get('encrypted'),
}
if module.params.get('kms_key_id'):
params['KmsKeyId'] = module.params.get('kms_key_id')
try:
if module.params.get('tag_equality'):
filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()]
filters.append(dict(Name='state', Values=['available', 'pending']))
images = ec2.describe_images(Filters=filters)
if len(images['Images']) > 0:
image = images['Images'][0]
if not image:
image = ec2.copy_image(**params)
image_id = image['ImageId']
if tags:
ec2.create_tags(Resources=[image_id],
Tags=ansible_dict_to_boto3_tag_list(tags))
changed = True
if module.params.get('wait'):
delay = 15
max_attempts = module.params.get('wait_timeout') // delay
ec2.get_waiter('image_available').wait(
ImageIds=[image_id],
WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(image))
except WaiterError as e:
module.fail_json_aws(e, msg='An error occurred waiting for the image to become available')
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not copy AMI")
except Exception as e:
module.fail_json(msg='Unhandled exception. (%s)' % str(e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
source_region=dict(required=True),
source_image_id=dict(required=True),
name=dict(default='default'),
description=dict(default=''),
encrypted=dict(type='bool', default=False, required=False),
kms_key_id=dict(type='str', required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=600),
tags=dict(type='dict')),
tag_equality=dict(type='bool', default=False))
module = AnsibleAWSModule(argument_spec=argument_spec)
# TODO: Check botocore version
ec2 = module.client('ec2')
copy_image(module, ec2)
if __name__ == '__main__':
main()
|
|
from datetime import date,timedelta
from dateutil.parser import parse
from igf_data.igfdb.baseadaptor import BaseAdaptor
from igf_data.utils.seqrunutils import get_seqrun_date_from_igf_id
from igf_data.utils.gviz_utils import convert_to_gviz_json_for_display
from igf_data.igfdb.igfTables import Base, Project,Sample,Experiment,Run,Seqrun,Pipeline,Pipeline_seed
class Project_status:
'''
A class for project status fetch and gviz json file generation for Google chart grantt plot
:param igf_session_class: Database session class
:param project_igf_id: Project igf id for database lookup
:param seqrun_work_day: Duration for seqrun jobs in days, default 2
:param analysis_work_day: Duration for analysis jobs in days, default 1
:param sequencing_resource_name: Resource name for sequencing data, default Sequencing
:param demultiplexing_resource_name: Resource name for demultiplexing data,default Demultiplexing
:param analysis_resource_name: Resource name for analysis data, default Primary Analysis
:param task_id_label: Label for task id field, default task_id
:param task_name_label: Label for task name field, default task_name
:param resource_label: Label for resource field, default resource
:param start_date_label: Label for start date field, default start_date
:param end_date_label: Label for end date field, default end_date
:param duration_label: Label for duration field, default duration
:param percent_complete_label: Label for percent complete field, default percent_complete
:param dependencies_label: Label for dependencies field, default dependencies
'''
def __init__(self,igf_session_class,project_igf_id,seqrun_work_day=2,
analysis_work_day=1,sequencing_resource_name='Sequencing',
demultiplexing_resource_name='Demultiplexing',
analysis_resource_name='Primary Analysis',
task_id_label='task_id',task_name_label='task_name',
resource_label='resource',dependencies_label='dependencies',
start_date_label='start_date',end_date_label='end_date',
duration_label='duration',percent_complete_label='percent_complete'):
self.project_igf_id=project_igf_id
self.base_adaptor=BaseAdaptor(**{'session_class':igf_session_class})
self.seqrun_work_day=seqrun_work_day
self.analysis_work_day=analysis_work_day
self.sequencing_resource_name=sequencing_resource_name
self.demultiplexing_resource_name=demultiplexing_resource_name
self.analysis_resource_name=analysis_resource_name
self.task_id_label=task_id_label
self.task_name_label=task_name_label
self.resource_label=resource_label
self.start_date_label=start_date_label
self.end_date_label=end_date_label
self.duration_label=duration_label
self.percent_complete_label=percent_complete_label
self.dependencies_label=dependencies_label
def generate_gviz_json_file(self,output_file,demultiplexing_pipeline,
analysis_pipeline,active_seqrun_igf_id=None):
'''
A wrapper method for writing a gviz json file with project status information
:param output_file: A filepath for writing project status
:param analysis_pipeline: Name of the analysis pipeline
:param demultiplexing_pipeline: Name of the demultiplexing pipeline
:param analysis_pipeline: name of the analysis pipeline
:param active_seqrun_igf_id: Igf id go the active seqrun, default None
:returns: None
'''
try:
data=list()
description=self.get_status_description() # get gviz description
column_order=self.get_status_column_order() # get gviz column order
seqrun_data=self.get_seqrun_info(\
active_seqrun_igf_id=active_seqrun_igf_id,
demultiplexing_pipeline=demultiplexing_pipeline) # get seqrun data
if len(seqrun_data)>0:
data.extend(seqrun_data) # add seqrun data
analysis_data=self.get_analysis_info(\
analysis_pipeline=analysis_pipeline) # get analysis status
if len(analysis_data)>0:
data.extend(analysis_data) # add analysis status
if len(data)>0:
convert_to_gviz_json_for_display(\
data=data,
description=description,
columns_order=column_order,
output_file=output_file) # create gviz json file
else:
with open(output_file,'w') as fp:
fp.write('') # create an empty file
except:
raise
@staticmethod
def get_status_description():
'''
A method for getting description for status json data
:returns: A dictionary containing status info
'''
try:
description={\
'task_id':('string', 'Task ID'),
'task_name':('string', 'Task Name'),
'resource':('string', 'Resource'),
'start_date':('date', 'Start Date'),
'end_date':('date', 'End Date'),
'duration':('number', 'Percent Complete'),
'percent_complete':('number', 'Duration'),
'dependencies':('string', 'Dependencies'),
}
return description
except:
raise
@staticmethod
def get_status_column_order():
'''
A method for fetching column order for status json data
:return: A list data containing the column order
'''
try:
columns_order=[\
'task_id',
'task_name',
'resource',
'start_date',
'end_date',
'duration',
'percent_complete',
'dependencies'
]
return columns_order
except:
raise
def _add_seqrun_info(self,flowcell_id,seqrun_igf_id,run_found):
'''
An internal method for adding sequencing run info to the status page
:param flowcell_id: A flowcell id for sequencing info
:param seqrun_igf_id: A seqrun igf id
:param run_found: A timestamp when run is found
:returns: A dictionary with seqrun data for the grantt plot
'''
try:
start_date=parse(get_seqrun_date_from_igf_id(seqrun_igf_id)) # fetch seqrun date
end_date=start_date+timedelta(days=self.seqrun_work_day) # calculate seqrun finish date
if end_date > run_found:
end_date=run_found # reset run end timestamp
duration=int((end_date-start_date).total_seconds()*1000) # calculate seqrun duration
percent_complete=100 # seqrun is already done
new_data=dict()
new_data.update(\
{self.task_id_label:'Run {0}'.format(flowcell_id),
self.task_name_label:'Run {0}'.format(flowcell_id),
self.resource_label:self.sequencing_resource_name,
self.start_date_label:start_date,
self.end_date_label:end_date,
self.duration_label:duration,
self.percent_complete_label:percent_complete,
self.dependencies_label:None,
})
return new_data
except:
raise
def _reformat_seqrun_data(self,data,active_seqrun_igf_id=None):
'''
An internal method for reformatting seqrun data series
:param data: A pandas data series containing seqrun entries for a project
:param active_seqrun_igf_id: Igf id of the active seqrun, default None
:returns: A list of dictionaries containing the required entries for sequencing runs
'''
try:
if 'date_created' not in data:
raise ValueError('Missing seqrun creation date')
start_date=data['date_created']
if 'status' in data and \
data['status']=='FINISHED':
end_date=data['date_stamp']
percent_complete=100
else:
end_date=data['date_created']+timedelta(days=self.seqrun_work_day)
percent_complete=0
if 'status' in data and \
data['status']!='FINISHED':
if active_seqrun_igf_id is not None and \
active_seqrun_igf_id==data['seqrun_igf_id']:
percent_complete=100
end_date=data['date_stamp']
else:
percent_complete=0
duration=int((end_date-start_date).total_seconds()*1000)
if duration < 86400000:
end_date=None # minimum duration is 1 day for the plot
new_data=dict()
new_data.update(\
{self.task_id_label:data['flowcell_id'],
self.task_name_label:'Flowcell {0}'.format(data['flowcell_id']),
self.resource_label:self.demultiplexing_resource_name,
self.start_date_label:data['date_created'],
self.end_date_label:end_date,
self.duration_label:duration,
self.percent_complete_label:percent_complete,
self.dependencies_label:'Run {0}'.format(data['flowcell_id']),
})
seqrun_data=self._add_seqrun_info(\
flowcell_id=data['flowcell_id'],
seqrun_igf_id=data['seqrun_igf_id'],
run_found=start_date) # fetch seqrun information
new_data_list=[seqrun_data,new_data]
return new_data_list
except:
raise
def get_analysis_info(self,analysis_pipeline):
'''
A method for fetching all active experiments and their run status for a project
:param analysis_pipeline: Name of the analysis pipeline
:return: A list of dictionary containing the analysis information
'''
try:
base=self.base_adaptor
base.start_session()
query=base.session.\
query(Experiment.experiment_igf_id,
Pipeline_seed.status,
Pipeline_seed.date_stamp,
Seqrun.flowcell_id).\
join(Run,Experiment.experiment_id==Run.experiment_id).\
join(Sample,Sample.sample_id==Experiment.sample_id).\
join(Project,Project.project_id==Sample.project_id).\
join(Pipeline_seed,Experiment.experiment_id==Pipeline_seed.seed_id).\
join(Pipeline,Pipeline.pipeline_id==Pipeline_seed.pipeline_id).\
join(Seqrun,Seqrun.seqrun_id==Run.seqrun_id).\
filter(Run.experiment_id==Experiment.experiment_id).\
filter(Seqrun.seqrun_id==Run.seqrun_id).\
filter(Experiment.sample_id==Sample.sample_id).\
filter(Sample.project_id==Project.project_id).\
filter(Pipeline_seed.seed_table=='experiment').\
filter(Sample.status=='ACTIVE').\
filter(Experiment.status=='ACTIVE').\
filter(Run.status=='ACTIVE').\
filter(Seqrun.reject_run=='N').\
filter(Pipeline.pipeline_id==Pipeline_seed.pipeline_id).\
filter(Pipeline.pipeline_name==analysis_pipeline).\
filter(Project.project_igf_id==self.project_igf_id)
results=base.fetch_records(query=query,
output_mode='dataframe')
base.close_session()
new_data=list()
if len(results.index)>0:
flowcell_ids=list(set(results['flowcell_id'].values))
results=results.drop(['flowcell_id'],axis=1).drop_duplicates()
status_data=[ {grp:len(g_data.index),'total':len(results.index)}
for grp, g_data in results.groupby('status')]
pct_complete=0
incomplete_exp=0
for status in status_data:
if 'FINISHED' in status:
pct_complete=int(status['FINISHED']/status['total']*100) # get percent complete
incomplete_exp=int(status['total']-status['FINISHED'])
first_update=results['date_stamp'].min()
first_update_status=results[results['date_stamp']==first_update]['status'].values[0]
if first_update_status=='SEEDED':
start_date=first_update
else:
start_date=first_update-timedelta(days=self.analysis_work_day) # get analysis start date
last_update=results['date_stamp'].max()
if incomplete_exp>0:
end_date=last_update+incomplete_exp*timedelta(days=self.analysis_work_day) # expected end date
else:
end_date=last_update # end date if all done
duration=int((end_date-start_date).total_seconds()*1000) # calculate analysis duration
new_data.append(\
{self.task_id_label:'Primary Analysis',
self.task_name_label:'Primary Analysis',
self.resource_label:self.analysis_resource_name,
self.start_date_label:start_date,
self.end_date_label:end_date,
self.duration_label:duration,
self.percent_complete_label:pct_complete,
self.dependencies_label:','.join(flowcell_ids),
})
return new_data
except:
raise
def get_seqrun_info(self,active_seqrun_igf_id=None,
demultiplexing_pipeline=None):
'''
A method for fetching all active sequencing runs for a project
:param active_seqrun_igf_id: Seqrun igf id for the current run, default None
:param demultiplexing_pipeline: Name of the demultiplexing pipeline, default None
:returns: A dictionary containing seqrun information
'''
try:
base=self.base_adaptor
base.start_session()
query=base.session.\
query(Seqrun.seqrun_igf_id,
Seqrun.flowcell_id,
Seqrun.date_created).\
join(Run,Seqrun.seqrun_id==Run.seqrun_id).\
join(Experiment,Experiment.experiment_id==Run.experiment_id).\
join(Sample,Sample.sample_id==Experiment.sample_id).\
join(Project,Project.project_id==Sample.project_id).\
filter(Seqrun.seqrun_id==Run.seqrun_id).\
filter(Seqrun.reject_run=='N').\
filter(Experiment.experiment_id==Run.experiment_id).\
filter(Sample.sample_id==Experiment.sample_id).\
filter(Project.project_id==Sample.project_id).\
filter(Project.project_igf_id==self.project_igf_id)
if demultiplexing_pipeline is not None:
query=base.session.\
query(Seqrun.seqrun_igf_id,
Seqrun.flowcell_id,
Seqrun.date_created,
Pipeline_seed.status,
Pipeline_seed.date_stamp).\
join(Run,Seqrun.seqrun_id==Run.seqrun_id).\
join(Experiment,Experiment.experiment_id==Run.experiment_id).\
join(Sample,Sample.sample_id==Experiment.sample_id).\
join(Project,Project.project_id==Sample.project_id).\
join(Pipeline_seed,Seqrun.seqrun_id==Pipeline_seed.seed_id).\
join(Pipeline,Pipeline.pipeline_id==Pipeline_seed.pipeline_id).\
filter(Seqrun.seqrun_id==Run.seqrun_id).\
filter(Experiment.experiment_id==Run.experiment_id).\
filter(Sample.sample_id==Experiment.sample_id).\
filter(Project.project_id==Sample.project_id).\
filter(Project.project_igf_id==self.project_igf_id).\
filter(Pipeline_seed.seed_table=='seqrun').\
filter(Pipeline.pipeline_name==demultiplexing_pipeline)
results=base.fetch_records(query=query,
output_mode='dataframe')
base.close_session()
results.drop_duplicates(inplace=True)
new_data=list()
if len(results.index)>0:
new_data.extend(\
results.\
apply(lambda data: self._reformat_seqrun_data(\
data,
active_seqrun_igf_id=active_seqrun_igf_id),
axis=1))
new_data=[entry for data in new_data
for entry in data]
return new_data
except:
raise
if __name__=='__main__':
import os, sqlalchemy
from igf_data.utils.dbutils import read_dbconf_json
from igf_data.igfdb.projectadaptor import ProjectAdaptor
from igf_data.igfdb.sampleadaptor import SampleAdaptor
from igf_data.igfdb.platformadaptor import PlatformAdaptor
from igf_data.igfdb.seqrunadaptor import SeqrunAdaptor
from igf_data.igfdb.experimentadaptor import ExperimentAdaptor
from igf_data.igfdb.runadaptor import RunAdaptor
from igf_data.igfdb.pipelineadaptor import PipelineAdaptor
dbconfig = 'data/dbconfig.json'
dbparam=read_dbconf_json(dbconfig)
base = BaseAdaptor(**dbparam)
engine = base.engine
dbname=dbparam['dbname']
Base.metadata.drop_all(engine)
if os.path.exists(dbname):
os.remove(dbname)
Base.metadata.create_all(engine)
platform_data=[{ "platform_igf_id" : "M001",
"model_name" : "MISEQ" ,
"vendor_name" : "ILLUMINA" ,
"software_name" : "RTA",
"software_version" : "RTA1.18.54"}] # platform data
flowcell_rule_data=[{"platform_igf_id":"M001",
"flowcell_type":"MISEQ",
"index_1":"NO_CHANGE",
"index_2":"NO_CHANGE"}] # flowcell data
project_data=[{'project_igf_id':'ProjectA'}] # project data
sample_data=[{'sample_igf_id':'SampleA',
'project_igf_id':'ProjectA'}] # sample data
seqrun_data=[{'seqrun_igf_id':'180810_K00345_0063_AHWL7CBBXX',
'flowcell_id':'000000000-D0YLK',
'platform_igf_id':'M001',
'flowcell':'MISEQ'},
{'seqrun_igf_id':'180610_K00345_0063_AHWL7CBBXX',
'flowcell_id':'000000000-D0YLJ',
'platform_igf_id':'M001',
'flowcell':'MISEQ'},
{'seqrun_igf_id':'180410_K00345_0063_AHWL7CBBXX',
'flowcell_id':'000000000-D0YLI',
'platform_igf_id':'M001',
'flowcell':'MISEQ'}
] # experiment data
experiment_data=[{'experiment_igf_id':'ExperimentA',
'sample_igf_id':'SampleA',
'library_name':'SampleA',
'platform_name':'MISEQ',
'project_igf_id':'ProjectA'}]
run_data=[{'run_igf_id':'RunA',
'experiment_igf_id':'ExperimentA',
'seqrun_igf_id':'180810_K00345_0063_AHWL7CBBXX',
'lane_number':'1'},
{'run_igf_id':'RunB',
'experiment_igf_id':'ExperimentA',
'seqrun_igf_id':'180610_K00345_0063_AHWL7CBBXX',
'lane_number':'1'},
{'run_igf_id':'RunC',
'experiment_igf_id':'ExperimentA',
'seqrun_igf_id':'180410_K00345_0063_AHWL7CBBXX',
'lane_number':'1'}
] # run data
base.start_session()
pl=PlatformAdaptor(**{'session':base.session})
pl.store_platform_data(data=platform_data) # loading platform data
pl.store_flowcell_barcode_rule(data=flowcell_rule_data) # loading flowcell rules data
pa=ProjectAdaptor(**{'session':base.session})
pa.store_project_and_attribute_data(data=project_data) # load project data
sa=SampleAdaptor(**{'session':base.session})
sa.store_sample_and_attribute_data(data=sample_data) # store sample data
sra=SeqrunAdaptor(**{'session':base.session})
sra.store_seqrun_and_attribute_data(data=seqrun_data) # load seqrun data
ea=ExperimentAdaptor(**{'session':base.session})
ea.store_project_and_attribute_data(data=experiment_data) # load experiment data
ra=RunAdaptor(**{'session':base.session})
ra.store_run_and_attribute_data(data=run_data) # load run data
pipeline_data=[{ "pipeline_name" : "DemultiplexIlluminaFastq",
"pipeline_db" : "sqlite:////bcl2fastq.db",
}]
pipeline_seed_data=[{'pipeline_name':'DemultiplexIlluminaFastq',
'seed_id':1, 'seed_table':'seqrun'},
{'pipeline_name':'DemultiplexIlluminaFastq',
'seed_id':2, 'seed_table':'seqrun'},
{'pipeline_name':'DemultiplexIlluminaFastq',
'seed_id':3, 'seed_table':'seqrun'},
]
pla=PipelineAdaptor(**{'session':base.session})
pla.store_pipeline_data(data=pipeline_data)
pla.create_pipeline_seed(data=pipeline_seed_data)
pipeline_data=[{ "pipeline_name" : "PrimaryAnalysis",
"pipeline_db" : "sqlite:////analysis.db",
}]
pipeline_seed_data=[{'pipeline_name':'PrimaryAnalysis',
'seed_id':1, 'seed_table':'experiment'},
{'pipeline_name':'PrimaryAnalysis',
'seed_id':2, 'seed_table':'experiment'},
{'pipeline_name':'PrimaryAnalysis',
'seed_id':3, 'seed_table':'experiment'}
]
pla.store_pipeline_data(data=pipeline_data)
pla.create_pipeline_seed(data=pipeline_seed_data)
base.commit_session()
base.close_session()
ps=Project_status(igf_session_class=base.get_session_class(),
project_igf_id='ProjectA')
#print(ps.get_seqrun_info(demultiplexing_pipeline='DemultiplexIlluminaFastq'))
#print(ps.get_seqrun_info(active_seqrun_igf_id='SeqrunA'))
#print(ps.get_seqrun_info(demultiplexing_pipeline='DemultiplexIlluminaFastq',
# active_seqrun_igf_id='180410_K00345_0063_AHWL7CBBXX'))
#print(ps.get_status_description())
#print(ps.get_status_column_order())
#print(ps.get_analysis_info(analysis_pipeline='PrimaryAnalysis'))
#ps.generate_gviz_json_file(output_file='a',
# demultiplexing_pipeline='DemultiplexIlluminaFastq',
# analysis_pipeline='PrimaryAnalysis',
# active_seqrun_igf_id='180410_K00345_0063_AHWL7CBBXX')
Base.metadata.drop_all(engine)
os.remove(dbname)
|
|
from csv import DictReader
from datetime import datetime, timedelta
from collections import defaultdict
import cPickle as pickle
from math import exp, log, sqrt
import random, gc
from util import read_dump, write_dump, cache, read_tsv, convert_ts, data, next_row, get_category
import argparse, ast, re, json
def filter_row(row, data_type, sr):
object_type = int(row["ObjectType"])
if object_type != 3:
return False
y = int(row.get("IsClick", 0))
if data_type == 0 and y == 0 and random.random() > sr:
return False
return True
def calc_ctr(x, y):
avg_ctr = 0.0060281
return int(round((x + avg_ctr * 10) * 100.0 / (y + 10)))
def log_trans(x):
if args.log > 0:
return int(round(log(x + 1)))
return x
def get_user_info():
user_info_map = {}
for t, row in read_tsv("data/UserInfo.tsv"):
for k in row:
row[k] = int(row[k])
uid = row["UserID"]
del row["UserID"]
user_info_map[uid] = row
return user_info_map
def trans_ad_info(ad_info):
if int(ad_info["IsContext"]) == 0:
return None
trans_keys = [
"CategoryID",
"Price",
"Params",
"Title",
]
del_keys = ["AdID", "IsContext", "_id", "LocationID",]
for k in del_keys:
if k in ad_info:
del ad_info[k]
for key in trans_keys:
val = ad_info[key]
if key == "Price":
if val == "":
pass
else:
ad_info[key] = float(ad_info[key])
elif key == "Params":
params = ad_info[key]
params = ast.literal_eval(params) if params else {}
for par_key in params:
params[par_key] = unicode(params[par_key], "utf-8")
val = tuple([hash_val(0, (k, v)) for k, v in params.items()])
if len(val) == 0:
val = (-1,)
ad_info[key] = val
elif key == "Title":
if not isinstance(ad_info[key], unicode):
ad_info[key] = unicode(ad_info[key], "utf-8")
else:
if val == "":
val = -1
ad_info[key] = int(val)
return ad_info
ad_info_list = []
ad_info_iter = read_tsv("data/AdsInfo.tsv")
def get_ad_info(aid):
while aid - 1 >= len(ad_info_list):
t, row = next(ad_info_iter, (None, None))
if row is None:
break
ad_info_list.append(trans_ad_info(row))
return ad_info_list[aid - 1]
se_params_iter = read_tsv("data/search_params.csv", delimiter=",")
se_param_list = [None]
def get_se_param(sid):
while se_param_list[0] is None or se_param_list[0]["SearchID"] < sid:
t, se_param = next(se_params_iter, (None, None))
se_param["SearchID"] = int(se_param["SearchID"])
params = json.loads(se_param["SearchParams"])
se_param["SearchParams"] = [hash_val(0, (int(k), v)) for (k, v) in params.items()]
se_param_list[0] = se_param
params = [-1,] if se_param_list[0]["SearchID"] != sid else se_param_list[0]["SearchParams"]
return params
ad_price_list = []
ad_price_iter = read_tsv("data/ad_price.tsv", delimiter=" ")
def get_ad_price(aid):
while aid - 1 >= len(ad_price_list):
t, row = next(ad_price_iter, (None, None))
if row is None:
break
price = row["Price"]
price = float(price) if price else ""
ad_price_list.append(price)
return ad_price_list[aid - 1]
def get_features(sinfo, rows, test=False):
feature_map = defaultdict(list)
sid = sinfo["SearchID"]
sinfo["SearchParams"] = get_se_param(sid)
user_cnt_row = next(user_cnt_iter, (None, None))[1]
while int(user_cnt_row["SearchID"]) != sid:
user_cnt_row = next(user_cnt_iter, (None, None))[1]
user_aid_cnt_rows = next(user_aid_cnt_iter, (None, None))[1]
while int(user_aid_cnt_rows[0]["SearchID"]) != sid:
user_aid_cnt_rows = next(user_aid_cnt_iter, (None, None))[1]
user_aid_cnt_dict = {}
for row in user_aid_cnt_rows:
aid = int(row["AdID"])
user_aid_cnt_dict[aid] = row
ad_infos = []
for row in rows:
aid = int(row["AdID"])
row.update(user_aid_cnt_dict[aid])
ad_infos.append(get_ad_info(aid))
uid = int(sinfo["UserID"])
user_info = user_info_map.get(uid, {"UserAgentID": "",
"UserAgentOSID": "",
"UserDeviceID": "",
"UserAgentFamilyID": ""})
ipid = int(sinfo["IPID"])
uid_cnt = uid_cnt_dict[uid]
if uid_cnt <= 30:
sinfo["UserID"] = -1
ipid_cnt = ipid_cnt_dict[ipid]
if ipid_cnt <= 30:
sinfo["IPID"] = -1
for row in rows:
aid = int(row["AdID"])
adid_cnt = adid_cnt_dict[aid]
row["adid_cnt"] = adid_cnt
if adid_cnt <= 30:
row["AdID"] = -1
feature_map["user_cnt"] = [user_cnt_row]
feature_map["user_info"] = [user_info]
feature_map["ad_info"] = ad_infos
feature_map["stream_info"] = rows
feature_map["sinfo"] = [sinfo]
return feature_map
def extract_slot_feas(rows, sinfo):
data = map(lambda x: (int(x["Position"]), int(x["ObjectType"]), x), rows)
data.sort()
price_data = []
ot_cnt = defaultdict(int)
all_pos = []
all_ot = []
for i in range(len(data)):
all_pos.append(data[i][0])
all_ot.append(data[i][1])
aid = int(data[i][2]["AdID"])
price_data.append((get_ad_price(aid), i))
i_obt = data[i][1]
ot_cnt[i_obt] += 1
ucnt, lcnt = 0, 0
for j in range(len(data)):
if i == j:
continue
j_obt = data[j][1]
if j_obt == 2:
if i < j:
lcnt += 1
else:
ucnt += 1
data[i][2]["hl_lcnt"] = lcnt
data[i][2]["hl_ucnt"] = ucnt
for k in range(1, 4):
v = ot_cnt[k]
sinfo["ot%s_cnt"%k] = v
sinfo["record_cnt"] = len(rows)
sinfo["pos_type"] = hash_val(0, tuple(all_pos))
sinfo["pos_ot_type"] = hash_val(0, tuple(all_ot))
price_data.sort()
avg_price, avg_cnt = 0, 0
for p, i in price_data:
if p != "":
avg_price += p
avg_cnt += 1
data[i][2]["price_pos"] = i
else:
data[i][2]["price_pos"] = -1
if avg_cnt == 0 or avg_price <= 0:
pass
else:
avg_price /= avg_cnt
for p, i in price_data:
if not p:
ratio = -1
elif avg_price <= 0:
ratio = -2
else:
ratio = int(round((p / avg_price) * 100))
data[i][2]["price_ratio"] = ratio
def stream_info_func(vs, name=False):
keys = ["AdID",
"Position",
"HistCTR",
"hl_lcnt",
"hl_ucnt",
"adid_cnt",
"clk_cnt",
"show_cnt",
"t_show_cnt",
"price_pos",
"price_ratio",
]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
val = v[k]
if k == "HistCTR":
if val != "":
val = int(round(float(val) * 1000))
elif k in ("pos_show_cnt", "adid_cnt"):
val = log_trans(int(val))
x[k] = val
x["u_aid_ctr"] = calc_ctr(int(x["clk_cnt"]), int(x["show_cnt"]))
# x["u_pos_ctr"] = calc_ctr(int(x["pos_clk_cnt"]), int(x["pos_show_cnt"]))
yield x
def sinfo_func(vs, name=False):
keys = [
"IPID",
"UserID",
"IsUserLoggedOn",
"SearchQuery",
"SearchParams",
"ot1_cnt",
"ot2_cnt",
"ot3_cnt",
"record_cnt",
"pos_type",
"pos_ot_type",
"s_LocationID",
"s_CategoryID",
]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
val = v[k]
if k == "SearchQuery":
query = unicode(v["SearchQuery"], "utf-8")
val = map(lambda x : hash_val(0, x), query.split())
val = filter(lambda x : query_cnt_dict[x] >= 9, val)
if len(val) == 0:
val = [-1,]
elif k == "SearchParams":
val = filter(lambda x : query_param_cnt_dict[x] >= 9, val)
if len(val) == 0:
val = [-1,]
x[k] = val
# date_str = v["SearchDate"]
# d = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S.0")
# x["hour"] = d.hour
# x["weekday"] = d.weekday()
yield x
def user_info_func(vs, name=False):
for v in vs[0]:
if name:
yield v.keys()
else:
x = {}
for k in v:
val = v[k]
x[k] = val
yield x
def ad_info_func(vs, name):
keys = ["CategoryID",
"Price",
"Params",
]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
val = v[k]
if k == "Price" and val != "":
val = int(round(log(val + 1)))
elif k == "Params":
val = filter(lambda x : ad_param_cnt_dict[x] >= 9, val)
if len(val) == 0:
val = [-1,]
x[k] = val
yield x
def ngram(query_word):
sz = len(query_word)
res = []
for i in range(sz - 1):
res.append(u"%s %s"%(query_word[i], query_word[i + 1]))
return res
def calc_sim(qw, tw):
cnt = 0
min_pos = 10000
match_w = []
for w in qw:
if w in tw:
match_w.append(w)
cnt += 1
min_pos = min(min_pos, tw.index(w))
ratio = int(round((cnt* 1.0 / len(qw)) * 100))
return {"cnt": cnt,
"pos": min_pos,
"ratio": ratio,
"match_w": match_w}
def query_feas(query_word, ad_info, name=False):
if name:
return ["qe_w_cnt", "qe_w_ratio", "qe_w_pos",
"qe_ng_cnt", "qe_ng_ratio", "qe_ng_min_pos", "t_match"]
x = {}
title = ad_info["Title"].split()
x["title_len"] = len(title)
if " ".join(query_word) in ad_info["Title"]:
x["t_match"] = 1
else:
x["t_match"] = 0
title = ad_info["Title"]
title_val = map(lambda x : hash_val(0, x), title.split())
title_val = filter(lambda x : title_cnt_dict[x] >= 9, title_val)
x["title"] = title_val if title_val else [-1,]
if len(query_word) == 0:
x["qe_w_cnt"] = -1
x["qe_w_ratio"] = -1
x["qe_w_pos"] = -1
else:
sim = calc_sim(query_word, title)
x["qe_w_cnt"] = sim["cnt"]
x["qe_w_ratio"] = sim["ratio"]
x["qe_w_pos"] = sim["pos"]
x["match_w"] = sim["match_w"]
qw_ngram = ngram(query_word)
if len(qw_ngram) == 0:
x["qe_ng_cnt"] = -1
x["qe_ng_ratio"] = -1
x["qe_ng_min_pos"] = -1
else:
title_ngram = ngram(title)
sim = calc_sim(qw_ngram, title_ngram)
x["qe_ng_cnt"] = sim["cnt"]
x["qe_ng_ratio"] = sim["ratio"]
x["qe_ng_min_pos"] = sim["pos"]
x["match_ng"] = sim["match_w"]
return x
unmatch_set = set()
def param_feas(se_params, ad_info, name=False):
if name:
return ["par_match", "par_nmatch", "par_miss"]
if len(se_params) == 0:
return [-1, -1, -1]
ad_params = ad_info["Params"]
x = {}
par_match = 0
par_miss = 0
par_nmatch = 0
for par_key, par_v in se_params.items():
ad_v = ad_params.get(par_key)
if par_v == ad_v:
par_match += 1
elif ad_v is not None:
par_nmatch += 1
key = (type(par_v), type(ad_v))
if key not in unmatch_set:
unmatch_set.add(key)
print key
print par_v.encode("utf-8"), u"----", ad_v.encode("utf-8")
else:
par_miss += 1
x["par_match"] = par_match
x["par_nmatch"] = par_nmatch
x["par_miss"] = par_miss
return x
def match_info_func(vs, name):
ad_infos = vs[0]
sinfo = vs[1][0]
s_ca_id = int(sinfo.get("CategoryID"))
s_ca_pid = category_map[s_ca_id]
# se_params = sinfo["Params"]
query = unicode(sinfo["SearchQuery"], "utf-8")
query = query.split() if query else []
keys = [
"ca_match",
"ca_pid_match"
] + query_feas(None, None, True)
# + param_feas(None, None, True)
for ad_info in ad_infos:
if name:
yield keys
else:
x = {}
""" ca_match """
ca_id = int(ad_info.get("CategoryID", -1))
if ca_id == s_ca_id:
x["ca_match"] = ca_id
else:
x["ca_match"] = -1
ca_pid = category_map.get(ca_id, -1)
if ca_pid == s_ca_pid:
x["ca_pid_match"] = ca_pid
else:
x["ca_pid_match"] = -1
x.update(query_feas(query, ad_info))
# x.update(param_feas(se_params, ad_info))
yield x
log_cnt_keys = set(["t_cnt","bf_cnt","af_cnt"])
def user_cnt_func(vs, name):
keys = ["t_cnt","bf_cnt","af_cnt",
"bf_3h_cnt","af_3h_cnt",
"bf_clk_cnt",
"bag2",
]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
if k not in v:
continue
val = v[k]
if k in log_cnt_keys:
val = log_trans(int(val))
x[k] = val
x["bf_ctr"] = calc_ctr(int(v["bf_clk_cnt"]), int(v["bf_cnt"]))
yield x
extract_func = [
(stream_info_func, ["stream_info"]),
(sinfo_func, ["sinfo"]),
(user_info_func, ["user_info"]),
(ad_info_func, ["ad_info"]),
(user_cnt_func, ["user_cnt"]),
(match_info_func, ["ad_info", "sinfo"]),
]
def extract(feature_map, name=False):
ins_size = 0
for k, v in feature_map.items():
ins_size = max(ins_size, len(v))
instances = [{} for _ in xrange(ins_size)]
for func, in_keys in extract_func:
vls = map(lambda k:feature_map[k], in_keys)
msize = reduce(lambda x, y: max(x, len(y)), vls, 0)
if msize == 1:
for x in func(vls, name):
for ins in instances:
ins.update(x)
break
else:
for t, x in enumerate(func(vls, name)):
instances[t].update(x)
return instances
one_hot_encoder = defaultdict(int)
def hash_val(t, v, dtype=None, D=22):
if dtype == "xgb":
return u"%s:%s"%(t, v)
elif dtype == "xgb2":
v = (t << D) | (hash(unicode(v)) & ((1 << D) - 1))
if v in one_hot_encoder:
return u"%s:%s"%(one_hot_encoder[v] + 60, 1)
else:
one_hot_encoder[v] = len(one_hot_encoder)
return u"%s:%s"%(one_hot_encoder[v] + 60, 1)
else:
return (t << D) | (hash(unicode(v)) & ((1 << D) - 1))
def main():
random.seed(args.seed)
xgb_set =set([
"price_pos", "ot1_cnt", "bf_cnt", "bf_clk_cnt",
"u_aid_ctr", "record_cnt", "show_cnt", "clk_cnt",
"t_cnt", "qe_w_pos", "HistCTR", "qe_ng_min_pos", "t_show_cnt",
"bf_ctr", "ot2_cnt", "Price",
"qe_ng_cnt", "title_len", "hl_ucnt",
"price_ratio", "hl_lcnt", "t_match", "qe_w_ratio",
"qe_ng_ratio", "Position",
"bf_3h_cnt", "qe_w_cnt",
"af_cnt", "ot3_cnt",
"af_3h_cnt", "adid_cnt", "IsUserLoggedOn",
])
xgb_sparse_set = set([
"pos_ot_type", "pos_type",
"ca_match", "ca_pid_match",
"CategoryID", "s_LocationID", "s_CategoryID",
"UserAgentFamilyID", "UserAgentOSID",
"UserDeviceID", "UserAgentID",
"UserID", "IPID", "AdID",
"SearchParams", "Params", "Title", "SearchQuery"
])
if args.test:
fh_list = [ open("data/tr_%s.%s"%(args.test, args.type), "w"),
open("data/cv_%s.%s"%(args.test, args.type), "w"),
open("data/te_%s.%s"%(args.test, args.type), "w")]
else:
fh_list = [open("data/tr.%s"%(args.type), "w"),
open("data/cv.%s"%(args.type), "w"),
open("data/te.%s"%(args.type), "w")]
data_iter = data(args.test, maxlines=args.maxl)
print "sr: %s"%args.sr
avg_ctr = defaultdict(lambda : [0, 0])
for line_cnt, (data_type, rows, sinfo) in enumerate(data_iter):
sinfo["s_LocationID"] = int(sinfo["LocationID"])
sinfo["s_CategoryID"] = int(sinfo["CategoryID"])
extract_slot_feas(rows, sinfo)
rows = filter(lambda x: filter_row(x, data_type, sr=args.sr), rows)
if not rows:
continue
feature_map = get_features(sinfo, rows, data_type > 0)
instances = extract(feature_map)
if line_cnt == 0:
for k, feas in feature_map.items():
print "-" * 80
print k
print feas[0].keys()
feas_name = sorted(instances[0].keys())
print len(feas_name), feas_name
if args.sz is not None:
write_dump("feas_name.dump", feas_name)
elif args.test:
write_dump("feas_name%s.dump"%args.test, feas_name)
else:
write_dump("feas_name.dump", feas_name)
# date_str = sinfo["SearchDate"]
# ts = convert_ts(datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S.0"))
fh = fh_list[data_type]
for ins_map, row in zip(instances, rows):
y = int(row.get("IsClick", 0))
avg_ctr[data_type][0] += y
avg_ctr[data_type][1] += 1
ins = []
for kt, k in enumerate(feas_name):
if "xgb" in args.type:
if k in xgb_set:
hash_type = "xgb"
elif k in xgb_sparse_set:
hash_type = "xgb2"
else:
if line_cnt == 0:
print "drop %s"%k
continue
else:
hash_type = ""
feas = ins_map[k]
if line_cnt == 0:
print kt, k, type(feas), feas
if isinstance(feas, list) or isinstance(feas, tuple):
for f in feas:
ins.append(hash_val(kt + 1, f, hash_type))
else:
ins.append(hash_val(kt + 1, feas, hash_type))
fh.write(unicode(y) + " " + " ".join(map(unicode, ins)) + "\n")
for key, value in avg_ctr.items():
print "%s, %s"%(key, value[0] * 1. / value[1])
for fh in fh_list:
fh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--test', type=int, default=0)
parser.add_argument('--mongo', type=int, default=0)
parser.add_argument('--sz', type=int, default=None)
parser.add_argument('--maxl', type=int, default=1e6)
parser.add_argument('--type', type=str, default="ins")
parser.add_argument('--sr', type=float, default=0.1)
parser.add_argument('--seed', type=int, default=9)
parser.add_argument('--date', type=int, default=0)
parser.add_argument('--log', type=int, default=1)
args = parser.parse_args()
if args.mongo:
from pymongo import MongoClient
import functools32 as functools
client = MongoClient('localhost', 27017)
db = client.test
@functools.lru_cache(maxsize=1000000)
def get_ad_info(aid):
ad_info = db.ad_info.find_one({"AdID": aid})
return trans_ad_info(ad_info)
uid_cnt_dict = read_dump("data/uid_cnt.dump")
adid_cnt_dict = read_dump("data/adid_cnt.dump")
ipid_cnt_dict = read_dump("data/ipid_cnt.dump")
query_cnt_dict = read_dump("data/query_cnt.dump")
title_cnt_dict = read_dump("data/title_cnt.dump")
query_param_cnt_dict = read_dump("data/query_param_cnt.dump")
ad_param_cnt_dict = read_dump("data/ad_param_cnt.dump")
user_info_map = get_user_info()
category_map = get_category()
user_cnt_iter = read_tsv("data/user_cnt.csv", delimiter=",")
user_aid_cnt_iter = next_row(read_tsv("data/user_aid_cnt.csv", delimiter=","))
main()
|
|
# Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
import sys
import os
import json
import click
import hashlib
import cProfile
import StringIO
import pstats
import frappe
import frappe.utils
from frappe.utils import cint
from distutils.spawn import find_executable
from functools import wraps
click.disable_unicode_literals_warning = True
def pass_context(f):
@wraps(f)
def _func(ctx, *args, **kwargs):
profile = ctx.obj['profile']
if profile:
pr = cProfile.Profile()
pr.enable()
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
if profile:
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s)\
.sort_stats('cumtime', 'tottime', 'ncalls')
ps.print_stats()
print s.getvalue()
return ret
return click.pass_context(_func)
def get_single_site(context):
if not len(context.sites) == 1:
print 'please select a site'
sys.exit(1)
site = context.sites[0]
return site
def call_command(cmd, context):
return click.Context(cmd, obj=context).forward(cmd)
@click.command('new-site')
@click.argument('site')
@click.option('--db-name', help='Database name')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--admin-password', help='Administrator password for new site', default=None)
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', help='Force restore if site/database already exists', is_flag=True, default=False)
@click.option('--source_sql', help='Initiate database with a SQL file')
@click.option('--install-app', multiple=True, help='Install app after installation')
def new_site(site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None, force=None, install_app=None, db_name=None):
"Install a new site"
if not db_name:
db_name = hashlib.sha1(site).hexdigest()[:10]
frappe.init(site=site)
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=verbose, install_apps=install_app, source_sql=source_sql, force=force)
if len(frappe.utils.get_sites()) == 1:
use(site)
def _new_site(db_name, site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None,force=False, reinstall=False):
"Install a new Frappe site"
from frappe.installer import install_db, make_site_dirs
from frappe.installer import install_app as _install_app
import frappe.utils.scheduler
frappe.init(site=site)
try:
# enable scheduler post install?
enable_scheduler = _is_scheduler_enabled()
except:
enable_scheduler = False
install_db(root_login=mariadb_root_username, root_password=mariadb_root_password, db_name=db_name, admin_password=admin_password, verbose=verbose, source_sql=source_sql,force=force, reinstall=reinstall)
make_site_dirs()
_install_app("frappe", verbose=verbose, set_as_patched=not source_sql)
if frappe.conf.get("install_apps"):
for app in frappe.conf.install_apps:
_install_app(app, verbose=verbose, set_as_patched=not source_sql)
if install_apps:
for app in install_apps:
_install_app(app, verbose=verbose, set_as_patched=not source_sql)
frappe.utils.scheduler.toggle_scheduler(enable_scheduler)
scheduler_status = "disabled" if frappe.utils.scheduler.is_scheduler_disabled() else "enabled"
print "*** Scheduler is", scheduler_status, "***"
frappe.destroy()
def _is_scheduler_enabled():
enable_scheduler = False
try:
frappe.connect()
enable_scheduler = cint(frappe.db.get_single_value("System Settings", "enable_scheduler")) and True or False
except:
pass
finally:
frappe.db.close()
return enable_scheduler
@click.command('restore')
@click.argument('sql-file-path')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--db-name', help='Database name for site in case it is a new one')
@click.option('--admin-password', help='Administrator password for new site')
@click.option('--install-app', multiple=True, help='Install app after installation')
@pass_context
def restore(context, sql_file_path, mariadb_root_username=None, mariadb_root_password=None, db_name=None, verbose=None, install_app=None, admin_password=None, force=None):
"Restore site database from an sql file"
site = get_single_site(context)
frappe.init(site=site)
db_name = db_name or frappe.conf.db_name or hashlib.sha1(site).hexdigest()[:10]
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=context.verbose, install_apps=install_app, source_sql=sql_file_path, force=context.force)
@click.command('reinstall')
@pass_context
def reinstall(context):
"Reinstall site ie. wipe all data and start over"
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.clear_cache()
installed = frappe.get_installed_apps()
frappe.clear_cache()
except Exception:
installed = []
finally:
if frappe.db:
frappe.db.close()
frappe.destroy()
frappe.init(site=site)
_new_site(frappe.conf.db_name, site, verbose=context.verbose, force=True, reinstall=True, install_apps=installed)
@click.command('install-app')
@click.argument('app')
@pass_context
def install_app(context, app):
"Install a new app to site"
from frappe.installer import install_app as _install_app
for site in context.sites:
frappe.init(site=site)
frappe.connect()
try:
_install_app(app, verbose=context.verbose)
finally:
frappe.destroy()
@click.command('list-apps')
@pass_context
def list_apps(context):
"Reinstall site ie. wipe all data and start over"
site = get_single_site(context)
frappe.init(site=site)
frappe.connect()
print "\n".join(frappe.get_installed_apps())
frappe.destroy()
@click.command('add-system-manager')
@click.argument('email')
@click.option('--first-name')
@click.option('--last-name')
@pass_context
def add_system_manager(context, email, first_name, last_name):
"Add a new system manager to a site"
import frappe.utils.user
for site in context.sites:
frappe.connect(site=site)
try:
frappe.utils.user.add_system_manager(email, first_name, last_name)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('migrate')
@click.option('--rebuild-website', help="Rebuild webpages after migration")
@pass_context
def migrate(context, rebuild_website=False):
"Run patches, sync schema and rebuild files/translations"
import frappe.modules.patch_handler
import frappe.model.sync
from frappe.utils.fixtures import sync_fixtures
import frappe.translate
from frappe.desk.notifications import clear_notifications
for site in context.sites:
print 'Migrating', site
frappe.init(site=site)
frappe.connect()
try:
prepare_for_update()
# run patches
frappe.modules.patch_handler.run_all()
# sync
frappe.model.sync.sync_all(verbose=context.verbose)
frappe.translate.clear_cache()
sync_fixtures()
clear_notifications()
finally:
frappe.destroy()
if rebuild_website:
call_command(build_website, context)
else:
call_command(sync_www, context)
def prepare_for_update():
from frappe.sessions import clear_global_cache
clear_global_cache()
@click.command('run-patch')
@click.argument('module')
@pass_context
def run_patch(context, module):
"Run a particular patch"
import frappe.modules.patch_handler
for site in context.sites:
frappe.init(site=site)
try:
frappe.connect()
frappe.modules.patch_handler.run_single(module, force=context.force)
finally:
frappe.destroy()
@click.command('reload-doc')
@click.argument('module')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def reload_doc(context, module, doctype, docname):
"Reload schema for a DocType"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.reload_doc(module, doctype, docname, force=context.force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('build')
@click.option('--make-copy', is_flag=True, default=False, help='Copy the files instead of symlinking')
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
def build(make_copy=False, verbose=False):
"Minify + concatenate JS and CSS files, build translations"
import frappe.build
import frappe
frappe.init('')
frappe.build.bundle(False, make_copy=make_copy, verbose=verbose)
@click.command('watch')
def watch():
"Watch and concatenate JS and CSS files as and when they change"
import frappe.build
frappe.init('')
frappe.build.watch(True)
@click.command('clear-cache')
@pass_context
def clear_cache(context):
"Clear cache, doctype cache and defaults"
import frappe.sessions
import frappe.website.render
from frappe.desk.notifications import clear_notifications
for site in context.sites:
try:
frappe.connect(site)
frappe.clear_cache()
clear_notifications()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
@click.command('clear-website-cache')
@pass_context
def clear_website_cache(context):
"Clear website cache"
import frappe.website.render
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
@click.command('destroy-all-sessions')
@pass_context
def destroy_all_sessions(context):
"Clear sessions of all users (logs them out)"
import frappe.sessions
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.sessions.clear_all_sessions()
frappe.db.commit()
finally:
frappe.destroy()
@click.command('sync-www')
@click.option('--force', help='Rebuild all pages', is_flag=True, default=False)
@pass_context
def sync_www(context, force=False):
"Sync files from static pages from www directory to Web Pages"
from frappe.website import statics
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
statics.sync_statics(rebuild=force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('build-website')
@pass_context
def build_website(context):
"Sync statics and clear cache"
from frappe.website import render, statics
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
render.clear_cache()
statics.sync(verbose=context.verbose).start(True)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('make-docs')
@pass_context
@click.argument('app')
@click.argument('docs_version')
def make_docs(context, app, docs_version):
"Setup docs in target folder of target app"
from frappe.utils.setup_docs import setup_docs
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
make.build(docs_version)
finally:
frappe.destroy()
@click.command('sync-docs')
@pass_context
@click.argument('app')
def sync_docs(context, app):
"Sync docs from /docs folder into the database (Web Page)"
from frappe.utils.setup_docs import setup_docs
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
make.sync_docs()
finally:
frappe.destroy()
@click.command('write-docs')
@pass_context
@click.argument('app')
@click.argument('target')
@click.option('--local', default=False, is_flag=True, help='Run app locally')
def write_docs(context, app, target, local=False):
"Setup docs in target folder of target app"
from frappe.utils.setup_docs import setup_docs
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
make.make_docs(target, local)
finally:
frappe.destroy()
@click.command('build-docs')
@pass_context
@click.argument('app')
@click.argument('docs_version')
@click.argument('target')
@click.option('--local', default=False, is_flag=True, help='Run app locally')
@click.option('--watch', default=False, is_flag=True, help='Watch for changes and rewrite')
def build_docs(context, app, docs_version, target, local=False, watch=False):
"Setup docs in target folder of target app"
from frappe.utils import watch as start_watch
for site in context.sites:
_build_docs_once(site, app, docs_version, target, local)
if watch:
def trigger_make(source_path, event_type):
if "/templates/autodoc/" in source_path:
_build_docs_once(site, app, docs_version, target, local)
elif ("/docs.css" in source_path
or "/docs/" in source_path
or "docs.py" in source_path):
_build_docs_once(site, app, docs_version, target, local, only_content_updated=True)
apps_path = frappe.get_app_path("frappe", "..", "..")
start_watch(apps_path, handler=trigger_make)
def _build_docs_once(site, app, docs_version, target, local, only_content_updated=False):
from frappe.utils.setup_docs import setup_docs
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
if not only_content_updated:
make.build(docs_version)
make.sync_docs()
make.make_docs(target, local)
finally:
frappe.destroy()
@click.command('reset-perms')
@pass_context
def reset_perms(context):
"Reset permissions for all doctypes"
from frappe.permissions import reset_perms
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
for d in frappe.db.sql_list("""select name from `tabDocType`
where istable=0 and custom=0"""):
frappe.clear_cache(doctype=d)
reset_perms(d)
finally:
frappe.destroy()
@click.command('execute')
@click.argument('method')
@click.option('--args')
@click.option('--kwargs')
@pass_context
def execute(context, method, args=None, kwargs=None):
"execute a function"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
if args:
args = eval(args)
else:
args = ()
if kwargs:
kwargs = eval(args)
else:
kwargs = {}
ret = frappe.get_attr(method)(*args, **kwargs)
if frappe.db:
frappe.db.commit()
finally:
frappe.destroy()
if ret:
print json.dumps(ret)
@click.command('celery')
@click.argument('args')
def celery(args):
"Run a celery command"
python = sys.executable
os.execv(python, [python, "-m", "frappe.celery_app"] + args.split())
@click.command('trigger-scheduler-event')
@click.argument('event')
@pass_context
def trigger_scheduler_event(context, event):
"Trigger a scheduler event"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.trigger(site, event, now=context.force)
finally:
frappe.destroy()
@click.command('enable-scheduler')
@pass_context
def enable_scheduler(context):
"Enable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.enable_scheduler()
frappe.db.commit()
print "Enabled for", site
finally:
frappe.destroy()
@click.command('disable-scheduler')
@pass_context
def disable_scheduler(context):
"Disable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.disable_scheduler()
frappe.db.commit()
print "Disabled for", site
finally:
frappe.destroy()
@click.command('export-doc')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def export_doc(context, doctype, docname):
"Export a single document to csv"
import frappe.modules
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.modules.export_doc(doctype, docname)
finally:
frappe.destroy()
@click.command('export-json')
@click.argument('doctype')
@click.argument('name')
@click.argument('path')
@pass_context
def export_json(context, doctype, name, path):
"Export doclist as json to the given path, use '-' as name for Singles."
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.export_json(doctype, path, name=name)
finally:
frappe.destroy()
@click.command('export-csv')
@click.argument('doctype')
@click.argument('path')
@pass_context
def export_csv(context, doctype, path):
"Dump DocType as csv"
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.export_csv(doctype, path)
finally:
frappe.destroy()
@click.command('export-fixtures')
@pass_context
def export_fixtures(context):
"export fixtures"
from frappe.utils.fixtures import export_fixtures
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
export_fixtures()
finally:
frappe.destroy()
@click.command('import-doc')
@click.argument('path')
@pass_context
def import_doc(context, path, force=False):
"Import (insert/update) doclist. If the argument is a directory, all files ending with .json are imported"
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.import_doc(path, overwrite=context.force)
finally:
frappe.destroy()
@click.command('import-csv')
@click.argument('path')
@click.option('--only-insert', default=False, is_flag=True, help='Do not overwrite existing records')
@click.option('--submit-after-import', default=False, is_flag=True, help='Submit document after importing it')
@click.option('--ignore-encoding-errors', default=False, is_flag=True, help='Ignore encoding errors while coverting to unicode')
@pass_context
def import_csv(context, path, only_insert=False, submit_after_import=False, ignore_encoding_errors=False):
"Import CSV using data import tool"
from frappe.core.page.data_import_tool import importer
from frappe.utils.csvutils import read_csv_content
site = get_single_site(context)
with open(path, 'r') as csvfile:
content = read_csv_content(csvfile.read())
frappe.init(site=site)
frappe.connect()
try:
importer.upload(content, submit_after_import=submit_after_import,
ignore_encoding_errors=ignore_encoding_errors, overwrite=not only_insert,
via_console=True)
frappe.db.commit()
except Exception:
print frappe.get_traceback()
frappe.destroy()
@click.command('bulk-rename')
@click.argument('doctype')
@click.argument('path')
@pass_context
def _bulk_rename(context, doctype, path):
"Rename multiple records via CSV file"
from frappe.model.rename_doc import bulk_rename
from frappe.utils.csvutils import read_csv_content
site = get_single_site(context)
with open(path, 'r') as csvfile:
rows = read_csv_content(csvfile.read())
frappe.init(site=site)
frappe.connect()
bulk_rename(doctype, rows, via_console = True)
frappe.destroy()
# translation
@click.command('build-message-files')
@pass_context
def build_message_files(context):
"Build message files for translation"
import frappe.translate
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.rebuild_all_translation_files()
finally:
frappe.destroy()
@click.command('get-untranslated')
@click.argument('lang')
@click.argument('untranslated_file')
@click.option('--all', default=False, is_flag=True, help='Get all message strings')
@pass_context
def get_untranslated(context, lang, untranslated_file, all=None):
"Get untranslated strings for language"
import frappe.translate
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.get_untranslated(lang, untranslated_file, get_all=all)
finally:
frappe.destroy()
@click.command('update-translations')
@click.argument('lang')
@click.argument('untranslated_file')
@click.argument('translated-file')
@pass_context
def update_translations(context, lang, untranslated_file, translated_file):
"Update translated strings"
import frappe.translate
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.update_translations(lang, untranslated_file, translated_file)
finally:
frappe.destroy()
@click.command('set-admin-password')
@click.argument('admin-password')
@pass_context
def set_admin_password(context, admin_password):
"Set Administrator password for a site"
import getpass
for site in context.sites:
try:
frappe.init(site=site)
while not admin_password:
admin_password = getpass.getpass("Administrator's password for {0}: ".format(site))
frappe.connect()
frappe.db.sql("""update __Auth set `password`=password(%s)
where user='Administrator'""", (admin_password,))
frappe.db.commit()
admin_password = None
finally:
frappe.destroy()
@click.command('mysql')
@pass_context
def mysql(context):
"Start Mariadb console for a site"
site = get_single_site(context)
frappe.init(site=site)
msq = find_executable('mysql')
os.execv(msq, [msq, '-u', frappe.conf.db_name, '-p'+frappe.conf.db_password, frappe.conf.db_name, '-h', frappe.conf.db_host or "localhost", "-A"])
@click.command('console')
@pass_context
def console(context):
"Start ipython console for a site"
site = get_single_site(context)
frappe.init(site=site)
frappe.connect()
frappe.local.lang = frappe.db.get_default("lang")
import IPython
IPython.embed()
@click.command('run-tests')
@click.option('--app')
@click.option('--doctype')
@click.option('--test', multiple=True)
@click.option('--driver')
@click.option('--module')
@pass_context
def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None):
"Run tests"
import frappe.test_runner
from frappe.utils import sel
tests = test
site = get_single_site(context)
frappe.init(site=site)
if frappe.conf.run_selenium_tests and False:
sel.start(context.verbose, driver)
try:
ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force)
if len(ret.failures) == 0 and len(ret.errors) == 0:
ret = 0
finally:
pass
if frappe.conf.run_selenium_tests:
sel.close()
sys.exit(ret)
@click.command('serve')
@click.option('--port', default=8000)
@click.option('--profile', is_flag=True, default=False)
@pass_context
def serve(context, port=None, profile=False, sites_path='.', site=None):
"Start development web server"
if not context.sites:
site = None
else:
site = context.sites[0]
import frappe.app
frappe.app.serve(port=port, profile=profile, site=site, sites_path='.')
@click.command('request')
@click.argument('args')
@pass_context
def request(context, args):
"Run a request as an admin"
import frappe.handler
import frappe.api
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
if "?" in args:
frappe.local.form_dict = frappe._dict([a.split("=") for a in args.split("?")[-1].split("&")])
else:
frappe.local.form_dict = frappe._dict()
if args.startswith("/api/method"):
frappe.local.form_dict.cmd = args.split("?")[0].split("/")[-1]
frappe.handler.execute_cmd(frappe.form_dict.cmd)
print frappe.response
finally:
frappe.destroy()
@click.command('doctor')
def doctor():
"Get diagnostic info about background workers"
from frappe.utils.doctor import doctor as _doctor
frappe.init('')
return _doctor()
@click.command('celery-doctor')
@click.option('--site', help='site name')
def celery_doctor(site=None):
"Get diagnostic info about background workers"
from frappe.utils.doctor import celery_doctor as _celery_doctor
frappe.init('')
return _celery_doctor(site=site)
@click.command('purge-all-tasks')
def purge_all_tasks():
"Purge any pending periodic tasks of 'all' event. Doesn't purge hourly, daily and weekly"
frappe.init('')
from frappe.utils.doctor import purge_pending_tasks
count = purge_pending_tasks()
print "Purged {} tasks".format(count)
@click.command('dump-queue-status')
def dump_queue_status():
"Dump detailed diagnostic infomation for task queues in JSON format"
frappe.init('')
from frappe.utils.doctor import dump_queue_status as _dump_queue_status
print json.dumps(_dump_queue_status(), indent=1)
@click.command('make-app')
@click.argument('destination')
@click.argument('app_name')
def make_app(destination, app_name):
from frappe.utils.boilerplate import make_boilerplate
make_boilerplate(destination, app_name)
@click.command('use')
@click.argument('site')
def _use(site, sites_path='.'):
use(site, sites_path=sites_path)
def use(site, sites_path='.'):
with open(os.path.join(sites_path, "currentsite.txt"), "w") as sitefile:
sitefile.write(site)
@click.command('backup')
@click.option('--with-files', default=False, is_flag=True, help="Take backup with files")
@pass_context
def backup(context, with_files=False, backup_path_db=None, backup_path_files=None, quiet=False):
"Backup"
from frappe.utils.backups import scheduled_backup
verbose = context.verbose
for site in context.sites:
frappe.init(site=site)
frappe.connect()
odb = scheduled_backup(ignore_files=not with_files, backup_path_db=backup_path_db, backup_path_files=backup_path_files, force=True)
if verbose:
from frappe.utils import now
print "database backup taken -", odb.backup_path_db, "- on", now()
if with_files:
print "files backup taken -", odb.backup_path_files, "- on", now()
frappe.destroy()
@click.command('remove-from-installed-apps')
@click.argument('app')
@pass_context
def remove_from_installed_apps(context, app):
from frappe.installer import remove_from_installed_apps
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_from_installed_apps(app)
finally:
frappe.destroy()
@click.command('uninstall-app')
@click.argument('app')
@click.option('--dry-run', help='List all doctypes that will be deleted', is_flag=True, default=False)
@pass_context
def uninstall(context, app, dry_run=False):
from frappe.installer import remove_app
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_app(app, dry_run)
finally:
frappe.destroy()
def move(dest_dir, site):
import os
if not os.path.isdir(dest_dir):
raise Exception, "destination is not a directory or does not exist"
frappe.init(site)
old_path = frappe.utils.get_site_path()
new_path = os.path.join(dest_dir, site)
# check if site dump of same name already exists
site_dump_exists = True
count = 0
while site_dump_exists:
final_new_path = new_path + (count and str(count) or "")
site_dump_exists = os.path.exists(final_new_path)
count = int(count or 0) + 1
os.rename(old_path, final_new_path)
frappe.destroy()
return final_new_path
@click.command('set-config')
@click.argument('key')
@click.argument('value')
@pass_context
def set_config(context, key, value):
from frappe.installer import update_site_config
for site in context.sites:
frappe.init(site=site)
update_site_config(key, value)
frappe.destroy()
@click.command('drop-site')
@click.argument('site')
@click.option('--root-login', default='root')
@click.option('--root-password')
def drop_site(site, root_login='root', root_password=None):
from frappe.installer import get_current_host, make_connection
from frappe.model.db_schema import DbManager
from frappe.utils.backups import scheduled_backup
frappe.init(site=site)
frappe.connect()
scheduled_backup(ignore_files=False, force=True)
db_name = frappe.local.conf.db_name
frappe.local.db = make_connection(root_login, root_password)
dbman = DbManager(frappe.local.db)
dbman.delete_user(db_name, get_current_host())
dbman.drop_database(db_name)
archived_sites_dir = os.path.join(frappe.get_app_path('frappe'), '..', '..', '..', 'archived_sites')
if not os.path.exists(archived_sites_dir):
os.mkdir(archived_sites_dir)
move(archived_sites_dir, site)
@click.command('version')
@pass_context
def get_version(context):
frappe.init(site=context.sites[0])
for m in sorted(frappe.local.app_modules.keys()):
module = frappe.get_module(m)
if hasattr(module, "__version__"):
print "{0} {1}".format(m, module.__version__)
# commands = [
# new_site,
# restore,
# install_app,
# run_patch,
# migrate,
# add_system_manager,
# celery
# ]
commands = [
new_site,
restore,
reinstall,
install_app,
list_apps,
add_system_manager,
migrate,
run_patch,
reload_doc,
build,
watch,
clear_cache,
clear_website_cache,
destroy_all_sessions,
sync_www,
build_website,
make_docs,
sync_docs,
write_docs,
build_docs,
reset_perms,
execute,
celery,
trigger_scheduler_event,
enable_scheduler,
disable_scheduler,
export_doc,
export_json,
export_csv,
export_fixtures,
import_doc,
import_csv,
_bulk_rename,
build_message_files,
get_untranslated,
update_translations,
set_admin_password,
mysql,
run_tests,
serve,
request,
doctor,
celery_doctor,
purge_all_tasks,
dump_queue_status,
console,
make_app,
_use,
backup,
remove_from_installed_apps,
uninstall,
drop_site,
set_config,
get_version,
]
|
|
"""
Make a "broken" horizontal bar plot, i.e. one with gaps, of run times.
(c) 2015 Massachusetts Institute of Technology
"""
import numpy
import pprint
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
label_fontsize = 11
labels = ['Disk Reset',
# 'Power On',
'OS Boot',
'OS Stabilize',
'Key Presses',
'Mem. (Clean)',
'Compress (Clean)',
# 'Start Capture',
'Buttons (Clean)',
'Run Binary',
'Mem. (Interim)',
'Screenshot (Interim)',
'Buttons (Click)',
'Extra Sleep',
'Mem. (Dirty)',
'Screenshot (Final)',
'Compress (Dirty)',
'Shutdown',
'Store Results',
'wtf',
'wtf',
'wtf',
'wtf']
mal_executed_index = 7
flip_text = [6]
def normalize_tuples(tuples):
start = tuples[0][0]
rtn_list = []
for tuple in tuples:
start_x = tuple[0]-start
x_len = tuple[1]-tuple[0]
rtn_list.append((start_x, x_len))
return rtn_list
# Get our virtual data
virtual_tuples = [(1439831168.838292, 1439831169.377921),
(1439831169.377968, 1439831190.231804),
(1439831190.231869, 1439831250.232374),
(1439831250.236563, 1439831287.317039),
(1439831287.317097, 1439831309.774347),
(1439831310.612543, 1439831402.00221), # Updated manually
(1439831311.01773, 1439831319.4211), (1439831319.929066, 1439831379.980296), (1439831379.98037, 1439831403.835613), (1439831403.835616, 1439831404.160049), (1439831404.160206, 1439831412.36363), (1439831412.367247, 1439831499.982088), (1439831499.98215, 1439831522.069226), (1439831522.069228, 1439831522.378756), (1439831522.378882, 1439831622.912597), (1439831622.912614, 1439831628.073827), (1439831628.108982, 1439831661.06076)]
virtual_tuples = normalize_tuples(virtual_tuples)
pprint.pprint(virtual_tuples)
# Get our physical data
physical_tuples = [(1439830680.396736, 1439831070.997367),
(1439831070.997433, 1439831114.95812),
(1439831114.958218, 1439831175.002975),
(1439831175.007641, 1439831216.18251),
(1439831216.182679, 1439831305.710553),
(1439831306.717004, 1439831454.234577), # Updated manually
(1439831307.13812, 1439831317.108357),
(1439831318.016684, 1439831378.074144),
(1439831378.074319, 1439831455.997746),
(1439831455.997755, 1439831460.148652),
(1439831460.148693, 1439831475.965947),
(1439831475.972392, 1439831498.053105),
(1439831498.053454, 1439831589.70029),
(1439831589.700302, 1439831594.548414),
(1439831594.548729, 1439831744.423983),
(1439831744.424003, 1439831772.876672),
(1439831773.100489, 1439831795.210495)]
physical_tuples = normalize_tuples(physical_tuples)
pprint.pprint(physical_tuples)
# physical_tuples = virtual_tuples
fig, (ax1, ax2) = plt.subplots(2)
y_val = len(virtual_tuples)
for idx in range(len(virtual_tuples)):
ax1.broken_barh([ physical_tuples[idx] ] , (y_val, 1), facecolors='grey')
ax2.broken_barh([ virtual_tuples[idx] ] , (y_val, 1), facecolors='grey')
print virtual_tuples[idx]
if idx == mal_executed_index:
ax1.annotate('Binary Executed', (physical_tuples[idx][0], y_val+.5),
xytext=(physical_tuples[idx][0]-500, y_val-2),
arrowprops=dict(facecolor='black',
arrowstyle="->"),
fontsize=label_fontsize)
ax2.annotate('Binary Executed', (virtual_tuples[idx][0], y_val+.5),
xytext=(virtual_tuples[idx][0]-125, y_val-2),
arrowprops=dict(facecolor='black',
arrowstyle="->"),
fontsize=label_fontsize)
elif idx in flip_text:
annotate_x = physical_tuples[idx][0]
ax1.annotate(labels[idx], (annotate_x, y_val+.5),
xytext=(annotate_x-300, y_val+.5),
arrowprops=dict(facecolor='black',
arrowstyle="->"),
fontsize=label_fontsize)
annotate_x = virtual_tuples[idx][0]
ax2.annotate(labels[idx], (annotate_x, y_val+.5),
xytext=(annotate_x-125, y_val+1),
arrowprops=dict(facecolor='black',
arrowstyle="->"),
fontsize=label_fontsize)
else:
annotate_x = physical_tuples[idx][1] + physical_tuples[idx][0]
ax1.annotate(labels[idx], (annotate_x, y_val+.5),
xytext=(annotate_x+20, y_val+1),
arrowprops=dict(facecolor='black',
arrowstyle="->"),
fontsize=label_fontsize)
annotate_x = virtual_tuples[idx][1] + virtual_tuples[idx][0]
ax2.annotate(labels[idx], (annotate_x, y_val+.5),
xytext=(annotate_x+20, y_val+1),
arrowprops=dict(facecolor='black',
arrowstyle="->"),
fontsize=label_fontsize)
y_val -= 1
idx += 1
# ax1.set_xlabel('Seconds Elapsed')
ax1.set_ylabel('Physical Analysis', fontsize=20)
ax2.set_ylabel('Virtual Analysis', fontsize=20)
max_x_virt = virtual_tuples[-1][0]+virtual_tuples[-1][1]
max_x_phy = physical_tuples[-1][0]+physical_tuples[-1][1]
ax1.set_ylim([1,len(virtual_tuples)+1])
ax2.set_ylim([1,len(virtual_tuples)+1])
ax2.set_xlabel('Time Elapsed (Minutes)', fontsize=20)
yticks = numpy.arange(len(physical_tuples))+1.5
# Remove top and right border
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
# ax1.set_yticks(yticks)
# ax1.set_yticklabels(['']*len(virtual_tuples))
#
#
# ax2.set_yticks(yticks)
# ax2.set_yticklabels(['']*len(virtual_tuples))
ax1.set_yticks([])
ax2.set_yticks([])
# labels_reversed = []
# for x in reversed(labels):
# labels_reversed.append(x)
# ax2.set_yticklabels([''] + labels_reversed)
ax2.grid(True)
ax1.grid(True)
ax1.set_xticks(range(0,int(max_x_phy*1.2),120))
ax1.set_xticklabels(range(0,200,2))
ax2.set_xticks(range(0,int(max_x_virt*1.2),60))
ax2.set_xticklabels(range(0,200,1))
ax1.set_xlim([0,max_x_phy*1.2])
ax2.set_xlim([0,max_x_virt*1.2])
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in ax2.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in ax2.yaxis.get_major_ticks():
tick.label.set_fontsize(15)
# ax.annotate('race interrupted', (61, 25),
# xytext=(0.8, 0.9), textcoords='axes fraction',
# arrowprops=dict(facecolor='black', shrink=0.05),
# fontsize=16,
# horizontalalignment='right', verticalalignment='top')
plt.tight_layout()
plt.savefig("runtime.eps", format='eps', dpi=1000)
# plt.show()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from proton.handlers import MessagingHandler
from proton.reactor import Container
from proton import Message, Endpoint
from system_test import main_module, TIMEOUT, TestTimeout
from system_test import unittest
class DrainMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.received_count = 0
self.address = address
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired: sent: %d rcvd: %d" % (self.sent_count, self.received_count)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE :
self.on_sendable(event)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 10 messages received indicates that the drain worked and we can
# declare that the test is successful
if self.received_count == 10 and event.link.credit == 0:
self.error = None
self.timer.cancel()
self.receiver.close()
self.sender.close()
self.conn.close()
def on_sendable(self, event):
if self.sent_count < 10:
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = event.sender.send(msg)
dlv.settle()
self.sent_count += 1
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count += 1
if self.received_count < 4:
event.receiver.flow(1)
elif self.received_count == 4:
# We are issuing a drain of 20. This means that we will receive all the 10 messages
# that the sender is sending. The router will also send back a response flow frame with
# drain=True but I don't have any way of making sure that the response frame reached the
# receiver
event.receiver.drain(20)
def run(self):
Container(self).run()
class DrainOneMessageHandler(DrainMessagesHandler):
def __init__(self, address):
super(DrainOneMessageHandler, self).__init__(address)
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count += 1
if self.received_count < 4:
event.receiver.flow(1)
elif self.received_count == 4:
# We are issuing a drain of 1 after we receive the 4th message.
# This means that going forward, we will receive only one more message.
event.receiver.drain(1)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 5 messages received (4 earlier messages and 1 extra message for drain=1)
# indicates that the drain worked and we can declare that the test is successful
if self.received_count == 5 and event.link.credit == 0:
self.error = None
self.timer.cancel()
self.receiver.close()
self.sender.close()
self.conn.close()
class DrainNoMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainNoMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.address = address
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired"
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_sendable(self, event):
self.receiver.drain(1)
def on_link_flow(self, event):
if self.receiver.credit == 0:
self.error = None
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
class DrainNoMoreMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainNoMoreMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.address = address
self.sent = 0
self.rcvd = 0
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d" % (self.sent, self.rcvd)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_sendable(self, event):
if self.sent == 0:
msg = Message(body="Hello World")
event.sender.send(msg)
self.sent += 1
def on_message(self, event):
self.rcvd += 1
def on_settled(self, event):
self.receiver.drain(1)
def on_link_flow(self, event):
if self.receiver.credit == 0:
self.error = None
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
class DrainMessagesMoreHandler(MessagingHandler):
"""
Make sure the clients can send/receive after going through a drain cycle.
Send phase
1. Sender sending first 10 messages
2. Sender paused waiting for drain to finish
3. Sender is sending second 10 messages
4. Sender is done.
Receive phase
1. Receiver receiving first four messages; At #4 receiver issues drain 4,20
2. Reciever receives messages 5..10.
When 10 messages have been received and link credit =0 the drain is done
Receiver issues 10 credits
3. Receiver recieves messages 11..20.
4. Receiver is done
At issue in DISPATCH-1055 is that the 10 credits issued in Receive step 2
are never propagated across a link route to the 'broker'.
This code is instantiated with and without the link route to demonstrate that
it works properly when the 'test-router' is handling the drain by itself
and that it fails only on the link route.
"""
def __init__(self, address, route_name):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainMessagesMoreHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.received_count = 0
self.address = address
self.error = "Unexpected Exit"
self.send_phase = 1
self.recv_phase = 1
self.route_name = route_name
self.verbose_printing = False
def show_state(self):
return str("send_phase:" + str(self.send_phase)
+ ", sent_count:" + str(self.sent_count)
+ ", recv_phase:" + str(self.recv_phase)
+ ", receive_count:" + str(self.received_count)
+ ", receiver_credit:" + str(self.receiver.credit)
+ ", sender_credit:" + str(self.sender.credit))
def printme(self, str):
if (self.verbose_printing):
print(str + " " + self.show_state())
def timeout(self):
self.error = "Timeout Expired: sent: %d rcvd: %d" % (self.sent_count, self.received_count)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, source=self.route_name)
self.sender = event.container.create_sender(self.conn, target=self.route_name)
self.receiver.flow(1)
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE :
self.on_sendable(event)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 10 messages received indicates that the drain worked.
if self.send_phase == 2 and self.received_count == 10 and event.link.credit == 0:
self.printme("sender transitions to phase 3 - drain completed, send new flow now")
self.receiver.flow(10)
self.send_phase = 3
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE :
self.on_sendable(event)
self.printme(("sender " if event.link.is_sender else "receiver ") + "exit on_link_flow:")
def on_sendable(self, event):
if event.link.is_sender and self.send_phase == 1 and self.sent_count < 10:
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = event.sender.send(msg)
dlv.settle()
self.sent_count += 1
if self.sent_count == 10:
self.printme("sender transitions to phase 2 - wait for drain to finish")
self.send_phase = 2
elif event.link.is_sender and self.send_phase == 3 and self.sent_count < 20:
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = event.sender.send(msg)
dlv.settle()
self.sent_count += 1
if self.sent_count == 20:
self.printme("sender transitions to phase 4 - done sending")
self.send_phase = 4
self.printme(("sender " if event.link.is_sender else "receiver ") + "exit on_sendable:")
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count += 1
if self.recv_phase == 1 and self.received_count < 4:
event.receiver.flow(1)
elif self.recv_phase == 1 and self.received_count == 4:
# We are issuing a drain of 20. This means that we will receive all the 10 messages
# that the sender is sending. The router will also send back a response flow frame with
# drain=True but I don't have any way of making sure that the response frame reached the
# receiver
self.printme("receiver transitions to phase 2 - sending drain now")
event.receiver.drain(20)
self.recv_phase = 2
elif self.recv_phase == 2 and self.received_count == 10:
self.printme("receiver transitions to phase 3")
self.recv_phase = 3
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = self.sender.send(msg)
dlv.settle()
self.sent_count += 1
elif self.recv_phase == 3 and self.received_count == 20:
self.printme("receiver transitions to phase 4 - test is completed successfully")
self.recv_phase = 4
self.error = None
self.timer.cancel()
self.receiver.close()
self.sender.close()
self.conn.close()
self.printme("exit on_message:")
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
|
|
from bugle.models import Blast
from bugle.search import query_to_q_object
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.paginator import Paginator, EmptyPage
from django.core.urlresolvers import reverse
from django.core.serializers import json
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.utils import simplejson
from django.utils.decorators import method_decorator
from django.utils.html import escape
from twitter_api.models import TwitterProfile
import urllib
def datetime_to_twitter(dt):
return dt.strftime('%a %b %d %H:%M:%S +0000 %Y') # Hard coded DST, ha
def dict_to_xml(dictionary, recursion=False):
"""
Tweetdeck chokes on ElementTree's XML.
"""
s = ''
if not recursion:
s += '<?xml version="1.0" encoding="UTF-8"?>'
for key, value in dictionary.items():
if isinstance(value, dict):
formatted_value = dict_to_xml(value, recursion=True)
elif isinstance(value, list):
formatted_value = ''.join(dict_to_xml(d, recursion=True) for d in value)
elif value is None:
formatted_value = ''
elif isinstance(value, basestring):
formatted_value = escape(value)
else:
formatted_value = escape(simplejson.dumps(value))
s += '<%s>%s</%s>' % (key, formatted_value, key)
return s
class View(object):
resource_name = None
login_required = False
current_site = Site.objects.get_current() # this isn't going to change
def __call__(self, request, *args, **kwargs):
method = getattr(self, 'render_%s' % kwargs['format'], None)
del kwargs['format']
if method is None:
raise Http404
if self.login_required:
method = login_required(method)
return method(request, *args, **kwargs)
def render_json(self, request, *args, **kwargs):
content = simplejson.dumps(
self.get_resource(request, *args, **kwargs),
indent=2, cls=json.DjangoJSONEncoder, ensure_ascii=False)
if 'callback' in request.GET:
content = '%s(%s)' % (request.GET['callback'], content)
return HttpResponse(content, content_type='application/json')
def render_xml(self, request, *args, **kwargs):
return HttpResponse(dict_to_xml({
self.resource_name: self.get_resource(request, *args, **kwargs)
}), content_type='text/xml')
def get_resource(self, request, *args, **kwargs):
raise NotImplementedError
def get_user(self, request, *args, **kwargs):
if 'user_id' in request.GET:
return User.objects.get(id=request.GET['user_id'])
elif 'screen_name' in request.GET:
return User.objects.get(username=request.GET['screen_name'])
elif 'id' in request.GET:
try:
user_id = int(request.GET['id'])
except ValueError:
return User.objects.get(username=request.GET['id'])
else:
return User.objects.get(id=user_id)
def tweeterise_timeline(self, request, blasts):
"""
Converts an iterable of blasts into tweets.
"""
tweets = []
for blast in blasts:
tweets.append(self.tweeterise_blast(request, blast))
return tweets
def get_text(self, blast):
text = [blast.message]
if blast.extended:
text.append('http://%s/blast/%s/' % (
self.current_site.domain,
blast.id
))
if blast.attachment:
text.append('[ http://%s%s ]' % (
self.current_site.domain,
blast.attachment.url
))
return ' '.join(text)
def tweeterise_blast(self, request, blast):
d = {
'contributors': None,
'geo': None,
'in_reply_to_status_id': None,
'in_reply_to_user_id': None,
'favorited': request.user.is_authenticated() and blast in request.user.favourites.all(),
'source': 'Fort',
'created_at': datetime_to_twitter(blast.created),
'coordinates': None,
'user': self.tweeterise_user(request, blast.user),
'place': None,
'id': blast.id,
'contributors': None,
'in_reply_to_screen_name': None,
'truncated': False,
'text': self.get_text(blast),
}
if blast.in_reply_to:
d['in_reply_to_status_id'] = blast.in_reply_to.pk
return d
def get_profile_image(self, user):
profile_image = ''
if user.username != 'subversion':
try:
profile_image = 'http://' + self.current_site.domain + user.twitter_profile.profile_image.url
except TwitterProfile.DoesNotExist:
pass
return profile_image
def tweeterise_user(self, request, user):
user_count = User.objects.count()
return {
'profile_sidebar_fill_color': 'ffffff',
'description': '',
'location': 'Fort.',
'notifications': False,
'profile_background_tile': False,
'profile_image_url': self.get_profile_image(user),
'statuses_count': user.blasts.count(),
'profile_sidebar_border_color': 'eeeeee',
'profile_use_background_image': True,
'followers_count': user_count,
'screen_name': user.username,
'contributors_enabled': False,
'lang': 'en',
'created_at': datetime_to_twitter(user.date_joined),
'friends_count': user_count,
'geo_enabled': False,
'profile_background_color': 'B2DFDA',
'favourites_count': user.favourites.count(),
'following': True,
'verified': True,
'profile_text_color': '333333',
'protected': False,
'time_zone': 'London',
'name': user.get_full_name(),
'profile_link_color': '93A644',
'url': 'http://%s/%s/' % (self.current_site.domain, user.username),
'id': user.id,
'profile_background_image_url': '',
'utc_offset': 0,
}
class TimelineView(View):
def get_blasts(self, request, *args, **kwargs):
blasts = Blast.objects.exclude(user__username='subversion')
if 'since_id' in request.GET:
blasts = blasts.filter(id__gt=request.GET['since_id'])
if 'max_id' in request.GET:
blasts = blasts.filter(id__lte=request.GET['max_id'])
return blasts
def get_page(self, request, *args, **kwargs):
count = 20
try:
count = int(request.GET['count'])
except (ValueError, KeyError):
pass
try:
count = int(request.GET['rpp'])
except (ValueError, KeyError):
pass
page = 1
try:
page = int(request.GET['page'])
except (ValueError, KeyError):
pass
try:
return Paginator(self.get_blasts(request, *args, **kwargs).order_by('-created'), count).page(page)
except EmptyPage:
raise Http404
def get_resource(self, request, *args, **kwargs):
return self.tweeterise_timeline(request, self.get_page(request, *args, **kwargs).object_list)
def render_xml(self, request, *args, **kwargs):
d = {'statuses': []}
for tweet in self.get_resource(request, *args, **kwargs):
d['statuses'].append({'status': tweet})
return HttpResponse(dict_to_xml({
self.resource_name: d
}), content_type='text/xml')
class LoginRequiredTimelineView(TimelineView):
login_required = True
class UserTimelineView(TimelineView):
def get_blasts(self, request):
user = self.get_user(request)
if not user:
raise Http404
return super(UserTimelineView, self).get_blasts(request).filter(
user=user)
class MentionsView(TimelineView):
login_required = True
def get_blasts(self, request):
return super(MentionsView, self).get_blasts(request).filter(
mentioned_users=request.user)
class FavoritesView(TimelineView):
login_required = True
def get_blasts(self, request, id=None):
if id:
try:
user_id = int(id)
except ValueError:
user = User.objects.get(username=id)
else:
user = User.objects.get(id=user_id)
else:
user = request.user
return super(FavoritesView, self).get_blasts(request).filter(
favourited_by=user)
class SearchView(TimelineView):
def get_blasts(self, request):
blasts = super(SearchView, self).get_blasts(request)
if 'q' in request.GET and request.GET['q']:
blasts = blasts.filter(query_to_q_object(request.GET['q'], 'message'))
return blasts
def get_resource(self, request):
output = {
'results': [],
'since_id': None,
'max_id': None, # not supported
'refresh_url': '?' + request.GET.urlencode(),
'results_per_page': None,
'next_page': None,
'completed_in': 0,
'page': int(request.GET.get('page', 1)),
'query': urllib.quote(request.GET.get('q', '')),
}
page = None
try:
page = self.get_page(request)
except Http404:
pass
# If the page doesn't exist or no query has been supplied, return an
# empty result set instead of 404ing
if not page or not request.GET.get('q', None):
return output
for blast in page.object_list:
tweet = {
'text': self.get_text(blast),
'to_user_id': None,
'to_user': None,
'from_user': blast.user.username,
'metadata': {},
'id': blast.id,
'from_user_id': blast.user.id,
'iso_language_code': 'en',
'source': 'Fort',
'profile_image_url': self.get_profile_image(blast.user),
'created_at': datetime_to_twitter(blast.created),
}
if blast.in_reply_to:
tweet['to_user_id'] = blast.in_reply_to.user.id
tweet['to_user'] = blast.in_reply_to.user.username
output['results'].append(tweet)
next_page_dict = request.GET.copy()
next_page_dict['page'] = page.number + 1
output['next_page'] = '?' + next_page_dict.urlencode()
try:
output['since_id'] = int(request.GET['since_id'])
except (KeyError, ValueError):
pass
try:
output['results_per_page'] = int(request.GET['rpp'])
except (KeyError, ValueError):
pass
return output
class StatusUpdateView(View):
login_required = True
resource_name = 'status'
def get_resource(self, request):
if request.method != 'POST':
raise Http404
return self.tweeterise_blast(request, Blast.objects.create(
user=request.user,
message=request.POST['status'].strip(),
))
class FavoritesCreateView(View):
login_required = True
resource_name = 'status'
def get_resource(self, request, id):
blast = get_object_or_404(Blast, id=id)
blast.favourited_by.add(request.user)
return self.tweeterise_blast(request, blast)
class FavoritesDestroyView(View):
login_required = True
resource_name = 'status'
def get_resource(self, request, id):
blast = get_object_or_404(Blast, id=id)
blast.favourited_by.remove(request.user)
return self.tweeterise_blast(request, blast)
class UsersShowView(View):
resource_name = 'user'
def get_resource(self, request):
user = self.get_user(request)
if not user:
raise Http404
return self.tweeterise_user(request, user)
class VerifyCredentialsView(UsersShowView):
login_required = True
def get_resource(self, request):
user = self.get_user(request)
if not user:
user = request.user
return self.tweeterise_user(request, user)
def oauth_access_token(request):
user = get_object_or_404(User, username=request.POST['x_auth_username'])
return HttpResponse('oauth_token=%(user_id)s&oauth_token_secret=%(user_id)s&user_id=%(user_id)s&screen_name=%(username)s&x_auth_expires=0' % {
'user_id': user.id,
'username': user.username,
})
class RateLimitStatusView(View):
resource_name = 'hash'
def get_resource(self, request):
return {
'remaining-hits': 9999999,
}
class CurrentTrendsView(View):
def get_resource(self, request):
return {
'trends': {
'2010-06-22 17:20:00': [
{
'name': 'Bacon',
'query': 'Bacon'
},
{
'name': 'Space',
'query': 'Space'
},
{
'name': 'Bunnies',
'query': 'Bunnies'
},
{
'name': 'Wookie',
'query': 'Wookie'
}
]
},
'as_of': 1277227101
}
|
|
import base64
import os
import re
import subprocess
from itertools import takewhile
from django.utils.encoding import smart_str
try:
from staticfiles import finders
except ImportError:
from django.contrib.staticfiles import finders # noqa
from pipeline.conf import settings
from pipeline.utils import to_class, relpath
from pipeline.storage import default_storage
MAX_IMAGE_SIZE = 32700
EMBEDDABLE = r'[/]?embed/'
URL_DETECTOR = r'url\([\'"]?([^\s)]+\.[a-z]+[\?\#\d\w]*)[\'"]?\)'
URL_REPLACER = r'url\(__EMBED__(.+?)(\?\d+)?\)'
DEFAULT_TEMPLATE_FUNC = "template"
TEMPLATE_FUNC = r"""var template = function(str){var fn = new Function('obj', 'var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push(\''+str.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/<%=([\s\S]+?)%>/g,function(match,code){return "',"+code.replace(/\\'/g, "'")+",'";}).replace(/<%([\s\S]+?)%>/g,function(match,code){return "');"+code.replace(/\\'/g, "'").replace(/[\r\n\t]/g,' ')+"__p.push('";}).replace(/\r/g,'\\r').replace(/\n/g,'\\n').replace(/\t/g,'\\t')+"');}return __p.join('');");return fn;};"""
MIME_TYPES = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.ttf': 'font/truetype',
'.otf': 'font/opentype',
'.woff': 'font/woff'
}
EMBED_EXTS = MIME_TYPES.keys()
FONT_EXTS = ['.ttf', '.otf', '.woff']
class Compressor(object):
asset_contents = {}
def __init__(self, storage=default_storage, verbose=False):
self.storage = storage
self.verbose = verbose
def js_compressor(self):
return to_class(settings.PIPELINE_JS_COMPRESSOR)
js_compressor = property(js_compressor)
def css_compressor(self):
return to_class(settings.PIPELINE_CSS_COMPRESSOR)
css_compressor = property(css_compressor)
def compress_js(self, paths, templates=None, **kwargs):
"""Concatenate and compress JS files"""
js = self.concatenate(paths)
if templates:
js = js + self.compile_templates(templates)
if not settings.PIPELINE_DISABLE_WRAPPER:
js = "(function() { %s }).call(this);" % js
compressor = self.js_compressor
if compressor:
js = getattr(compressor(verbose=self.verbose), 'compress_js')(js)
return js
def compress_css(self, paths, output_filename, variant=None, **kwargs):
"""Concatenate and compress CSS files"""
css = self.concatenate_and_rewrite(paths, output_filename, variant)
compressor = self.css_compressor
if compressor:
css = getattr(compressor(verbose=self.verbose), 'compress_css')(css)
if not variant:
return css
elif variant == "datauri":
return self.with_data_uri(css)
else:
raise CompressorError("\"%s\" is not a valid variant" % variant)
def compile_templates(self, paths):
compiled = ""
if not paths:
return compiled
namespace = settings.PIPELINE_TEMPLATE_NAMESPACE
base_path = self.base_path(paths)
for path in paths:
contents = self.read_file(path)
contents = re.sub(r"\r?\n", "\\\\n", contents)
contents = re.sub(r"'", "\\'", contents)
name = self.template_name(path, base_path)
compiled += "%s['%s'] = %s('%s');\n" % (
namespace,
name,
settings.PIPELINE_TEMPLATE_FUNC,
contents
)
compiler = TEMPLATE_FUNC if settings.PIPELINE_TEMPLATE_FUNC == DEFAULT_TEMPLATE_FUNC else ""
return "\n".join([
"%(namespace)s = %(namespace)s || {};" % {'namespace': namespace},
compiler,
compiled
])
def base_path(self, paths):
def names_equal(name):
return all(n == name[0] for n in name[1:])
directory_levels = zip(*[p.split(os.sep) for p in paths])
return os.sep.join(x[0] for x in takewhile(names_equal, directory_levels))
def template_name(self, path, base):
"""Find out the name of a JS template"""
if not base:
path = os.path.basename(path)
if path == base:
base = os.path.dirname(path)
name = re.sub(r"^%s[\/\\]?(.*)%s$" % (
re.escape(base), re.escape(settings.PIPELINE_TEMPLATE_EXT)
), r"\1", path)
return re.sub(r"[\/\\]", "_", name)
def concatenate_and_rewrite(self, paths, output_filename, variant=None):
"""Concatenate together files and rewrite urls"""
stylesheets = []
for path in paths:
def reconstruct(match):
asset_path = match.group(1)
if asset_path.startswith("http") or asset_path.startswith("//"):
return "url(%s)" % asset_path
asset_url = self.construct_asset_path(asset_path, path,
output_filename, variant)
return "url(%s)" % asset_url
content = self.read_file(path)
content = re.sub(URL_DETECTOR, reconstruct, smart_str(content))
stylesheets.append(content)
return '\n'.join(stylesheets)
def concatenate(self, paths):
"""Concatenate together a list of files"""
return '\n'.join([self.read_file(path) for path in paths])
def construct_asset_path(self, asset_path, css_path, output_filename, variant=None):
"""Return a rewritten asset URL for a stylesheet"""
public_path = self.absolute_path(asset_path, os.path.dirname(css_path))
if self.embeddable(public_path, variant):
return "__EMBED__%s" % public_path
if not os.path.isabs(asset_path):
asset_path = self.relative_path(public_path, output_filename)
return asset_path
def embeddable(self, path, variant):
"""Is the asset embeddable ?"""
name, ext = os.path.splitext(path)
font = ext in FONT_EXTS
if not variant:
return False
if not (re.search(EMBEDDABLE, path) and self.storage.exists(path)):
return False
if not ext in EMBED_EXTS:
return False
if not (font or len(self.encoded_content(path)) < MAX_IMAGE_SIZE):
return False
return True
def with_data_uri(self, css):
def datauri(match):
path = match.group(1)
mime_type = self.mime_type(path)
data = self.encoded_content(path)
return "url(\"data:%s;charset=utf-8;base64,%s\")" % (mime_type, data)
return re.sub(URL_REPLACER, datauri, css)
def encoded_content(self, path):
"""Return the base64 encoded contents"""
if path in self.__class__.asset_contents:
return self.__class__.asset_contents[path]
data = self.read_file(path)
self.__class__.asset_contents[path] = base64.b64encode(data)
return self.__class__.asset_contents[path]
def mime_type(self, path):
"""Get mime-type from filename"""
name, ext = os.path.splitext(path)
return MIME_TYPES[ext]
def absolute_path(self, path, start):
"""
Return the absolute public path for an asset,
given the path of the stylesheet that contains it.
"""
if os.path.isabs(path):
path = os.path.join(default_storage.location, path)
else:
path = os.path.join(start, path)
return os.path.normpath(path)
def relative_path(self, absolute_path, output_filename):
"""Rewrite paths relative to the output stylesheet path"""
absolute_path = os.path.join(settings.PIPELINE_ROOT, absolute_path)
output_path = os.path.join(settings.PIPELINE_ROOT, os.path.dirname(output_filename))
return relpath(absolute_path, output_path)
def read_file(self, path):
"""Read file content in binary mode"""
file = default_storage.open(path, 'rb')
content = file.read()
file.close()
return content
class CompressorBase(object):
def __init__(self, verbose):
self.verbose = verbose
def filter_css(self, css):
raise NotImplementedError
def filter_js(self, js):
raise NotImplementedError
class CompressorError(Exception):
"""This exception is raised when a filter fails"""
pass
class SubProcessCompressor(CompressorBase):
def execute_command(self, command, content):
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
pipe.stdin.write(smart_str(content))
pipe.stdin.close()
compressed_content = pipe.stdout.read()
pipe.stdout.close()
error = pipe.stderr.read()
pipe.stderr.close()
if pipe.wait() != 0:
if not error:
error = "Unable to apply %s compressor" % self.__class__.__name__
raise CompressorError(error)
if self.verbose:
print error
return compressed_content
|
|
"""
Provides the ApiConnection class
"""
import copy
import functools
import io
import json
import os
import random
import threading
import time
import urllib3
import uuid
import requests
from .dat_jank import post_to_get
from .exceptions import ApiError
from .exceptions import ApiAuthError, ApiConnectionError, ApiTimeoutError
from .exceptions import ApiInternalError, ApiNotFoundError
from .exceptions import ApiInvalidRequestError, ApiConflictError
from .exceptions import Api503RetryError, ApiValidationFailedError
from .constants import REST_PORT, REST_PORT_HTTPS
from .constants import VERSION, RETRY_TIMEOUT
from .dlogging import get_log
from .schema.reader import get_reader
__copyright__ = "Copyright 2017, Datera, Inc."
LOG = get_log(__name__)
# TODO(_alastor_): Add certificate verification
urllib3.disable_warnings()
def _version_to_int(ver):
# Using a factor of 100 per digit so up to 100 versions are supported
# per major/minor/patch/subpatch digit in this calculation
# Example:
# In [2]: _version_to_int("3.3.0.0")
# Out[2]: 303000000
# In [3]: _version_to_int("2.2.7.1")
# Out[3]: 202070100
VERSION_DIGITS = 4
factor = pow(10, VERSION_DIGITS * 2)
div = pow(10, 2)
val = 0
for c in ver.split("."):
val += int(int(c) * factor)
factor /= div
return val
def dat_version_gte(version_a, version_b):
return _version_to_int(version_a) >= _version_to_int(version_b)
def _with_authentication(method):
"""
Decorator to wrap Api method calls so that we login again if our key
expires.
"""
@functools.wraps(method)
def wrapper_method(self, *args, **kwargs):
""" Call the original method with a re-login if needed """
# if we haven't logged in yet, log in and then try the method:
if not self._logged_in:
LOG.debug("Log in to API...")
self.login()
return method(self, *copy.deepcopy(args), **copy.deepcopy(kwargs))
# already logged in, but retry if needed in case the key expires:
args_copy = copy.deepcopy(args)
kwargs_copy = copy.deepcopy(kwargs)
try:
return method(self, *args, **kwargs)
except ApiAuthError as e:
if e.message == ('The key provided with the request does not '
'correspond to a valid session.'):
LOG.debug("API auth error, so try to log in again...")
else:
LOG.warn("API auth error, so try to log in again...")
self.login()
return method(self, *args_copy, **kwargs_copy)
return wrapper_method
def _with_retry(method):
"""
Decorator to wrap Api method calls so we retry on 503 errors
"""
@functools.wraps(method)
def _wrapper_retry(self, *args, **kwargs):
""" Call the original method with backoff """
tstart = time.time()
backoff = 1
err = None
no_response = False
while time.time() - tstart < RETRY_TIMEOUT:
try:
# if len(args) > 0 and args[0].lower() == "post":
# method(self, *copy.deepcopy(args),
# **copy.deepcopy(kwargs))
# if not no_response:
# raise ApiConnectionError("BadStatusLineWhatever")
# if no_response:
# raise ApiConflictError("Ruh-Roh")
return method(
self, *copy.deepcopy(args), **copy.deepcopy(kwargs))
except Api503RetryError as e:
no_response = False
if self._context.retry_503_type == "random":
err = e
slp = round(random.random() * 10, 2)
LOG.warn("Hit 503 Retry. "
"Adding random sleep and trying again in "
"{}s".format(slp))
time.sleep(slp)
elif self._context.retry_503_type == "backoff":
err = e
LOG.warn("Hit 503 Retry. "
"Backing off and trying again in {}s".format(backoff))
time.sleep(backoff)
backoff += 1
else:
raise
except ApiConnectionError as e:
no_response = True
if self._context.retry_connection_type == "random":
err = e
slp = round(random.random() * 10, 2)
LOG.warn("Hit Connection Error: {} Backing off and trying "
"again in {}s".format(e, slp))
time.sleep(slp)
elif self._context.retry_connection_type == "backoff":
err = e
LOG.warn("Hit Connection Error: {} Backing off and trying "
"again in {}s".format(e, backoff))
time.sleep(backoff)
backoff += 1
else:
raise
except ApiConflictError as e:
# ConflictError can only happen with POST requests
if no_response:
return post_to_get(
self, method, args[1], kwargs['data'], e)
raise
raise ApiTimeoutError("Request never succeeded before timeout period "
"expired: {}".format(err))
return _wrapper_retry
def _make_unicode_string(inval):
"""
Converts a string or bytes into a UTF-8 unicode string
"""
try:
return unicode(inval, 'utf-8') # Python2
except NameError:
return str(inval, 'utf-8') # Python3
class ApiConnection(object):
"""
This class wraps the HTTP connection, translates to/from JSON, and
handles authentication.
Its methods raise ApiError (or its subclasses) when things go wrong
"""
HEADER_DATA = {'Datera-Driver': 'Python-SDK-{}'.format(VERSION)}
def __init__(self, context):
"""
Initialize a connection from a context object, which defines
the hostname, username, password, etc.
"""
self._context = context
self._hostname = context.hostname
self._username = context.username
self._password = context.password
self._tenant = context.tenant
self._version = context.version
self._cert = context.cert
self._cert_key = context.cert_key
self._secure = context.secure
self._timeout = context.timeout
self._ldap_server = context.ldap_server
self._extra_headers = context.extra_headers
self._verify = context.verify
self._lock = threading.Lock()
self._key = None
self.reader = None
self._logged_in = False
self._product_version = None
@classmethod
def from_context(cls, context):
return cls(context)
def _get_request_attrs(self, urlpath):
if self._secure:
protocol = 'https'
port = REST_PORT_HTTPS
cert_data = ((self._cert, self._cert_key)
if self._cert_key else self._cert)
else:
protocol = 'http'
port = REST_PORT
cert_data = None
api_version = self._version
host = self._hostname
connection_string = '{}://{}:{}/v{}/{}'.format(
protocol, host, port, api_version.strip('v'),
urlpath.strip('/'))
return protocol, port, cert_data, api_version, host, connection_string
def _http_connect_request(self, method, urlpath, headers=None, params=None,
body=None, files=None, sensitive=False):
protocol, port, cert_data, api_version, host, connection_string = \
self._get_request_attrs(urlpath)
headers.update(**self._extra_headers)
if headers['Datera-Driver'] != self.HEADER_DATA['Datera-Driver']:
headers['Datera-Driver'] = "|".join((
headers['Datera-Driver'], self.HEADER_DATA['Datera-Driver']))
request_id = uuid.uuid4()
if sensitive:
dbody = "********"
dheaders = "********"
else:
dbody = body
dheaders = headers
LOG.debug("\nDatera Trace ID: %(tid)s\n"
"Datera Request ID: %(rid)s\n"
"Datera Request URL: %(url)s\n"
"Datera Request Method: %(method)s\n"
"Datera Request Payload: %(payload)s\n"
"Datera Request Headers: %(header)s\n",
{'tid': getattr(
self._context.thread_local, 'trace_id', None),
'rid': request_id,
'url': connection_string,
'method': method,
'payload': dbody,
'header': dheaders})
t1 = time.time()
try:
resp = getattr(requests, method.lower())(
connection_string, headers=headers, params=params,
data=body, verify=self._verify, files=files,
cert=cert_data)
if sensitive or '/api' in resp.url:
payload = "*********"
else:
payload = resp.content
# Python 2/3 compatibility
try:
payload = payload.decode('utf-8').replace('\n', '')
except AttributeError:
payload = str(payload).replace('\n', '')
t2 = time.time()
timedelta = round(t2 - t1, 3)
LOG.debug("\nDatera Trace ID: %(tid)s\n"
"Datera Response ID: %(rid)s\n"
"Datera Response TimeDelta: %(delta)ss\n"
"Datera Response URL: %(url)s\n"
"Datera Response Code: %(rcode)s\n"
"Datera Response Payload: %(payload)s\n"
"Datera Response Object: %(obj)s\n",
{'tid': getattr(
self._context.thread_local, 'trace_id', None),
'rid': request_id,
'delta': timedelta,
'url': resp.url,
'rcode': resp.status_code,
'payload': payload,
'obj': None})
except requests.ConnectionError as e:
raise ApiConnectionError(e, '')
except requests.Timeout as e:
raise ApiTimeoutError(e, '')
if files:
resp_data = {}
else:
resp_data = resp.json()
resp_status = resp.status_code
resp_reason = resp.reason
resp_headers = resp.headers
self._assert_response_successful(
method, urlpath, body, resp_data, resp_status, resp_reason)
return (resp_data, resp_status, resp_reason, resp_headers)
def _get_schema(self, endpoint):
"""
Tries to access cached schema, if not available, pulls new schema
from the remote box.
"""
data = None
if os.path.exists(self._context.schema_loc):
with io.open(self._context.schema_loc, 'rb') as f:
fdata = f.read()
data = {}
if fdata:
try:
# Python 2.7
data = json.loads(fdata)
except TypeError:
# Python 3+
data = json.loads(fdata.decode('utf-8'))
if self._version in data:
return data[self._version]
# Making it sensitive so it doesn't clog the logs
data[self._version] = self.read_endpoint(endpoint,
sensitive=True)
else:
# Making it sensitive so it doesn't clog the logs
data = {self._version: self.read_endpoint(endpoint,
sensitive=True)}
with io.open(self._context.schema_loc, 'wb+') as f:
jdata = json.dumps(data)
try:
# Python 2.7
f.write(jdata)
except TypeError:
# Python 3+
f.write(jdata.encode('utf-8'))
return data[self._version]
@_with_retry
def login(self, **params):
""" Login to the API, store the key, get schema """
if params:
if params.get("ldap_server"):
send_data = {"name": params.get("name"),
"password": params.get("password"),
"remote_server": params.get("ldap_server")}
else:
send_data = {"name": params.get("name"),
"password": params.get("password")}
else:
send_data = {"name": self._username, "password": self._password}
if self._ldap_server:
send_data.update({'remote_server': self._ldap_server})
body = json.dumps(send_data)
headers = {}
headers["content-type"] = "application/json; charset=utf-8"
urlpath = "/login"
method = "PUT"
with self._lock:
resp_dict, resp_status, resp_reason, resp_hdrs = \
self._http_connect_request(method, urlpath, body=body,
headers=headers, sensitive=True)
if 'key' not in resp_dict or not resp_dict['key']:
raise ApiAuthError("No auth key returned", resp_dict)
key = str(resp_dict['key'])
self._key = key
self._logged_in = True
Reader = get_reader(self._version)
reader = Reader(self._get_schema(Reader._endpoint))
self.reader = reader
system = self.read_entity("/system")
self._product_version = system["sw_version"]
def logout(self):
""" Perform logout operation with the key"""
with self._lock:
key = self._key
self._key = None
self._logged_in = False
headers = dict()
headers["content-type"] = "application/json; charset=utf-8"
headers["auth-token"] = key
urlpath = "/logout"
method = "PUT"
self._http_connect_request(method, urlpath, headers=headers)
@property
def auth_key(self):
return self._key
@auth_key.setter
def auth_key(self, new_key):
self._key = new_key
def _assert_response_successful(self, method, urlpath, body,
resp_data, resp_status, resp_reason):
"""
Raises an exception if the response was an error
resp_data (str)
resp_status (str)
resp_reason (str)
"""
if resp_status >= 200 and resp_status <= 299:
return
msg = "[REQUEST]: " + method + " " + urlpath + "\n"
if body is not None:
msg += str(body) + "\n"
if resp_data:
msg += '[RESPONSE]:\n'
msg += str(resp_data) + "\n"
msg += str(resp_status) + " " + str(resp_reason)
if resp_status == 401:
raise ApiAuthError(msg, resp_data)
elif resp_status == 404:
raise ApiNotFoundError(msg, resp_data)
elif resp_status == 400 or resp_status == 405 or \
resp_status == 403:
raise ApiInvalidRequestError(msg, resp_data)
elif resp_status == 422:
raise ApiValidationFailedError(msg, resp_data)
elif resp_status == 409:
raise ApiConflictError(msg, resp_data)
elif resp_status == 500:
if 'REST server is still initializing' in resp_data.get(
'message', ''):
raise Api503RetryError(msg, resp_data)
raise ApiInternalError(msg, resp_data)
elif resp_status == 503:
raise Api503RetryError(msg, resp_data)
else:
raise ApiError(msg, resp_data)
def _do_request(self, method, urlpath, files=None, data=None, params=None,
**kwargs):
"""
Handle the aggregation of different pages for the request
"""
resp_meta, resp_data = \
self._do_auth_request(method, urlpath, data=data, params=params,
files=files, **kwargs)
# No metadata means no pagination
if "metadata" not in resp_meta:
return resp_meta, resp_data
if "offset" in resp_meta["metadata"]:
offset = resp_meta["metadata"]["offset"]
else:
offset = 0
if params is None:
params = {}
# hotfix for issue with filter not being applied before
# calculating total_count. We're assuming that if
# 'filter' is applied, we'll never have more than
# 100 results which should account for the vast majority
# of cases anyways.
# This is fixed in the latest 3.3 patch
filter_skip = False
if "filter" in params and not dat_version_gte(
self._product_version, "3.3.0.0"):
filter_skip = True
# Should we handle POST or PUT?
# Limiting the method to GET for now
if (method == "GET" and "total_count" in resp_meta["metadata"] and
"request_count" in resp_meta["metadata"] and not filter_skip):
# Keep firing until we get everything
while True:
ccount = offset + resp_meta["metadata"]["request_count"]
tcount = resp_meta["metadata"]["total_count"]
limit = params.get("limit", tcount) if params else tcount
# If we've hit the limit or there isn't a limit parameter in
# the returned metadata, we're done
if limit <= ccount or "limit" not in resp_meta["metadata"]:
break
offset += resp_meta["metadata"]["limit"]
next_offset = {"offset": offset}
params.update(next_offset)
resp_meta, resp_data_container = \
self._do_auth_request(
method, urlpath, data, params=params)
# Expanding the result
if isinstance(resp_data, dict):
resp_data.update(resp_data_container)
else:
resp_data.extend(resp_data_container)
return resp_meta, resp_data
def _build_body_headers(self, data=None, files=None, params=None,
**kwargs):
headers = {}
# tenant header
if self._version == 'v2':
pass # v2 did not support multi-tenancy
else:
if self._tenant:
headers["tenant"] = self._tenant
else:
headers["tenant"] = '/root'
if isinstance(data, dict):
if 'tenant' in data:
headers["tenant"] = data['tenant']
data.pop('tenant')
elif 'data' in data:
if 'tenant' in data['data']:
headers["tenant"] = data['data']['tenant']
data['data'].pop('tenant')
elif isinstance(params, dict):
if 'tenant' in params:
headers["tenant"] = params['tenant']
params.pop('tenant')
# Auth-Token header
if self._key:
headers["Auth-Token"] = self._key
if not files:
# content-type header
headers["content-type"] = "application/json; charset=utf-8"
if data is None:
body = None
elif isinstance(data, str):
body = data
elif files is not None:
# Requests can't push a string and a file at the same time, so we
# just pass the data as a normal python dict
body = data
else:
body = json.dumps(data)
return body, headers
@_with_retry
@_with_authentication
def _do_auth_request(self, method, urlpath, files=None, data=None,
params=None, **kwargs):
"""
Translates to/from JSON as needed, calls _http_connect_request()
Bubbles up ApiError on error
"""
sensitive = kwargs.get('sensitive')
body, headers = self._build_body_headers(
data=data, params=params, files=files)
parsed_data, resp_status, resp_reason, _resp_headers = \
self._http_connect_request(method, urlpath, params=params,
body=body, headers=headers, files=files,
sensitive=sensitive)
ret_metadata = {}
ret_data = parsed_data
if self._version == 'v2':
# v2 had no metadata
ret_data = parsed_data
ret_metadata = {}
else:
if isinstance(parsed_data, dict) and 'data' in parsed_data:
ret_data = parsed_data.pop('data')
ret_metadata = parsed_data
return ret_metadata, ret_data
@_with_retry
@_with_authentication
def _do_stream_request(self, urlpath, data=None, params=None, **kwargs):
body, headers = self._build_body_headers(data=data, params=params)
try:
while True:
parsed_data, resp_status, resp_reason, resp_headers = \
self._http_connect_request(
"GET", urlpath, params=params, body=body,
headers=headers, **kwargs)
ret_data = parsed_data.pop('data')
ret_metadata = parsed_data
yield ret_metadata, ret_data
except KeyboardInterrupt:
LOG.debug("Ctrl-C recieved, ending stream")
########################################
def create_entity(self, path, data, sensitive=False):
"""
Returns the parsed response data
Raises ApiError on error
Parameters:
path (str) - Endpoint path, e.g. "/app_templates"
data (dict) - e.g. {"name": "myapptemplate"}
"""
_metadata, data = self._do_request("POST", path, data=data,
sensitive=sensitive)
return data
def read_endpoint(self, path, params=None, sensitive=False):
"""
Returns the parsed response data
Raises ApiError on error
Parameters:
path (str) - Endpoint path, e.g. "/app_templates"
params (dict) - Querry Params, e.g. "/app_templates?key=value"
"""
_metadata, data = self._do_request("GET", path, params=params,
sensitive=sensitive)
if 'complete' in _metadata:
if not _metadata['complete']:
return None
return data
def read_entity(self, path, params=None, sensitive=False):
"""
Returns the parsed response data
Raises ApiError on error
Parameters:
path (str) - Entity path, e.g. "/app_templates/myapptemplate"
params (dict) - Querry Params, e.g. "/app_templates?key=value"
"""
_metadata, data = self._do_request("GET", path, params=params,
sensitive=sensitive)
return data
def update_endpoint(self, path, data, sensitive=False):
"""
Returns the parsed response data
Raises ApiError on error
Parameters:
path (str) - Endpoint path
data (dict)
"""
_metadata, data = self._do_request("PUT", path, data=data,
sensitive=sensitive)
return data
def update_entity(self, path, data, sensitive=False):
"""
Returns the parsed response data
Raises ApiError on error
Parameters:
path (str) - Entity path, e.g. "/app_templates/myapptemplate"
data (dict)
"""
_metadata, data = self._do_request("PUT", path, data=data,
sensitive=sensitive)
return data
def upload_endpoint(self, path, files, data, sensitive=False):
"""
Returns the parsed response data
Raises ApiError on error
Parameters:
path (str) - Entity path, e.g. "/app_templates/myapptemplate"
data (dict)
files (list)
sensitive (boolean)
"""
_metadata, data = self._do_request("PUT", path, data=data,
files=files, sensitive=sensitive)
return data
def stream_endpoint(self, path, data, interval, timeout):
"""
Streams Endpoint Data
Raises ApiError on error
Parameters:
path (str) - Entity path, e.g. "/app_templates/myapptemplate"
data (dict)
"""
if timeout == 0:
timeout = "inf"
try:
for _metadata, data in self._do_stream_request(path, data=data):
yield _metadata, data
time.sleep(interval)
if timeout != "inf":
timeout -= interval
if timeout <= 0:
LOG.debug("Timeout reached, ending stream")
return
except KeyboardInterrupt:
LOG.debug("Ctrl-C recieved, ending stream")
def delete_entity(self, path, data=None, sensitive=False):
"""
Returns the parsed response data
Raises ApiError on HTTP error
Parameters:
path (str) - Entity path, e.g. "/app_templates/myapptemplate"
"""
_metadata, data = self._do_request("DELETE", path, data=data,
sensitive=sensitive)
return data
|
|
# -*- coding: utf-8 -*-
import contextlib
import copy
import datetime
import json
import threading
import elasticsearch
import mock
import pytest
from elasticsearch.exceptions import ElasticsearchException
from elastalert.enhancements import BaseEnhancement
from elastalert.kibana import dashboard_temp
from elastalert.util import dt_to_ts
from elastalert.util import dt_to_unix
from elastalert.util import dt_to_unixms
from elastalert.util import EAException
from elastalert.util import ts_to_dt
from elastalert.util import unix_to_dt
START_TIMESTAMP = '2014-09-26T12:34:45Z'
END_TIMESTAMP = '2014-09-27T12:34:45Z'
START = ts_to_dt(START_TIMESTAMP)
END = ts_to_dt(END_TIMESTAMP)
def _set_hits(ea_inst, hits):
res = {'hits': {'hits': hits}}
ea_inst.client_es.return_value = res
def generate_hits(timestamps, **kwargs):
hits = []
id_iter = xrange(len(timestamps)).__iter__()
for ts in timestamps:
data = {'_id': 'id' + str(id_iter.next()), '_source': {'@timestamp': ts}, '_type': 'logs'}
for key, item in kwargs.iteritems():
data['_source'][key] = item
hits.append(data)
return {'hits': {'hits': hits}}
def assert_alerts(ea_inst, calls):
""" Takes a list of lists of timestamps. Asserts that an alert was called for each list, containing those timestamps. """
assert ea_inst.rules[0]['alert'][0].alert.call_count == len(calls)
for call_num, call_args in enumerate(ea_inst.rules[0]['alert'][0].alert.call_args_list):
assert not any([match['@timestamp'] not in calls[call_num] for match in call_args[0][0]])
assert len(call_args[0][0]) == len(calls[call_num])
def test_starttime(ea):
invalid = ['2014-13-13',
'2014-11-24T30:00:00',
'Not A Timestamp']
for ts in invalid:
with pytest.raises((TypeError, ValueError)):
ts_to_dt(ts)
def test_init_rule(ea):
# Simulate state of a rule just loaded from a file
ea.rules[0]['minimum_starttime'] = datetime.datetime.now()
new_rule = copy.copy(ea.rules[0])
map(new_rule.pop, ['agg_matches', 'current_aggregate_id', 'processed_hits', 'minimum_starttime'])
# Properties are copied from ea.rules[0]
ea.rules[0]['starttime'] = '2014-01-02T00:11:22'
ea.rules[0]['processed_hits'] = ['abcdefg']
new_rule = ea.init_rule(new_rule, False)
for prop in ['starttime', 'agg_matches', 'current_aggregate_id', 'processed_hits', 'minimum_starttime']:
assert new_rule[prop] == ea.rules[0][prop]
# Properties are fresh
new_rule = ea.init_rule(new_rule, True)
new_rule.pop('starttime')
assert 'starttime' not in new_rule
assert new_rule['processed_hits'] == {}
def test_query(ea):
ea.current_es.search.return_value = {'hits': {'hits': []}}
ea.run_query(ea.rules[0], START, END)
ea.current_es.search.assert_called_with(body={'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}, 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], ignore_unavailable=True, size=100000)
def test_query_with_fields(ea):
ea.rules[0]['_source_enabled'] = False
ea.current_es.search.return_value = {'hits': {'hits': []}}
ea.run_query(ea.rules[0], START, END)
ea.current_es.search.assert_called_with(body={'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}, 'sort': [{'@timestamp': {'order': 'asc'}}], 'fields': ['@timestamp']}, index='idx', ignore_unavailable=True, size=100000)
def test_query_with_unix(ea):
ea.rules[0]['timestamp_type'] = 'unix'
ea.rules[0]['dt_to_ts'] = dt_to_unix
ea.current_es.search.return_value = {'hits': {'hits': []}}
ea.run_query(ea.rules[0], START, END)
start_unix = dt_to_unix(START)
end_unix = dt_to_unix(END)
ea.current_es.search.assert_called_with(body={'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}, 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], ignore_unavailable=True, size=100000)
def test_query_with_unixms(ea):
ea.rules[0]['timestamp_type'] = 'unixms'
ea.rules[0]['dt_to_ts'] = dt_to_unixms
ea.current_es.search.return_value = {'hits': {'hits': []}}
ea.run_query(ea.rules[0], START, END)
start_unix = dt_to_unixms(START)
end_unix = dt_to_unixms(END)
ea.current_es.search.assert_called_with(body={'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}, 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], ignore_unavailable=True, size=100000)
def test_no_hits(ea):
ea.current_es.search.return_value = {'hits': {'hits': []}}
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 0
def test_no_terms_hits(ea):
ea.rules[0]['use_terms_query'] = True
ea.rules[0]['query_key'] = 'QWERTY'
ea.rules[0]['doc_type'] = 'uiop'
ea.current_es.search.return_value = {'hits': {'hits': []}}
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_terms_data.call_count == 0
def test_some_hits(ea):
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
hits_dt = generate_hits([START, END])
ea.current_es.search.return_value = hits
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 1
ea.rules[0]['type'].add_data.assert_called_with([x['_source'] for x in hits_dt['hits']['hits']])
def test_some_hits_unix(ea):
ea.rules[0]['timestamp_type'] = 'unix'
ea.rules[0]['dt_to_ts'] = dt_to_unix
ea.rules[0]['ts_to_dt'] = unix_to_dt
hits = generate_hits([dt_to_unix(START), dt_to_unix(END)])
hits_dt = generate_hits([START, END])
ea.current_es.search.return_value = copy.deepcopy(hits)
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 1
ea.rules[0]['type'].add_data.assert_called_with([x['_source'] for x in hits_dt['hits']['hits']])
def _duplicate_hits_generator(timestamps, **kwargs):
"""Generator repeatedly returns identical hits dictionaries
"""
while True:
yield generate_hits(timestamps, **kwargs)
def test_duplicate_timestamps(ea):
ea.current_es.search.side_effect = _duplicate_hits_generator([START_TIMESTAMP] * 3, blah='duplicate')
ea.run_query(ea.rules[0], START, ts_to_dt('2014-01-01T00:00:00Z'))
assert len(ea.rules[0]['type'].add_data.call_args_list[0][0][0]) == 3
assert ea.rules[0]['type'].add_data.call_count == 1
# Run the query again, duplicates will be removed and not added
ea.run_query(ea.rules[0], ts_to_dt('2014-01-01T00:00:00Z'), END)
assert ea.rules[0]['type'].add_data.call_count == 1
def test_match(ea):
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
ea.rules[0]['alert'][0].alert.called_with({'@timestamp': END_TIMESTAMP})
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_run_rule_calls_garbage_collect(ea):
start_time = '2014-09-26T00:00:00Z'
end_time = '2014-09-26T12:00:00Z'
ea.buffer_time = datetime.timedelta(hours=1)
ea.run_every = datetime.timedelta(hours=1)
with contextlib.nested(mock.patch.object(ea.rules[0]['type'], 'garbage_collect'),
mock.patch.object(ea, 'run_query')) as (mock_gc, mock_get_hits):
ea.run_rule(ea.rules[0], ts_to_dt(end_time), ts_to_dt(start_time))
# Running elastalert every hour for 12 hours, we should see self.garbage_collect called 12 times.
assert mock_gc.call_count == 12
# The calls should be spaced 1 hour apart
expected_calls = [ts_to_dt(start_time) + datetime.timedelta(hours=i) for i in range(1, 13)]
for e in expected_calls:
mock_gc.assert_any_call(e)
def run_rule_query_exception(ea, mock_es):
with mock.patch('elastalert.elastalert.Elasticsearch') as mock_es_init:
mock_es_init.return_value = mock_es
ea.run_rule(ea.rules[0], END, START)
# Assert neither add_data nor garbage_collect were called
# and that starttime did not change
assert ea.rules[0].get('starttime') == START
assert ea.rules[0]['type'].add_data.call_count == 0
assert ea.rules[0]['type'].garbage_collect.call_count == 0
assert ea.rules[0]['type'].add_count_data.call_count == 0
def test_query_exception(ea):
mock_es = mock.Mock()
mock_es.search.side_effect = ElasticsearchException
run_rule_query_exception(ea, mock_es)
def test_query_exception_count_query(ea):
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'blahblahblahblah'
mock_es = mock.Mock()
mock_es.count.side_effect = ElasticsearchException
run_rule_query_exception(ea, mock_es)
def test_match_with_module(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
test_match(ea)
mod.process.assert_called_with({'@timestamp': END})
def test_agg(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
alerttime1 = dt_to_ts(ts_to_dt(hits_timestamps[0]) + datetime.timedelta(minutes=10))
hits = generate_hits(hits_timestamps)
ea.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.Elasticsearch'):
# Aggregate first two, query over full range
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to elasticsearch
call1 = ea.writeback_es.create.call_args_list[0][1]['body']
call2 = ea.writeback_es.create.call_args_list[1][1]['body']
call3 = ea.writeback_es.create.call_args_list[2][1]['body']
assert call1['match_body'] == {'@timestamp': '2014-09-26T12:34:45'}
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == alerttime1
assert call2['match_body'] == {'@timestamp': '2014-09-26T12:40:45'}
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body'] == {'@timestamp': '2014-09-26T12:47:45'}
assert not call3['alert_sent']
assert 'aggregate_id' not in call3
# First call - Find all pending alerts
# Second call - Find matches with agg_id == 'ABCD'
# Third call - Find matches with agg_id == 'CDEF'
ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_source': call1},
{'_id': 'BCDE', '_source': call2},
{'_id': 'CDEF', '_source': call3}]}},
{'hits': {'hits': [{'_id': 'BCDE', '_source': call2}]}},
{'hits': {'hits': []}}]
with mock.patch('elastalert.elastalert.Elasticsearch') as mock_es:
ea.send_pending_alerts()
# Assert that current_es was refreshed from the aggregate rules
assert mock_es.called_with(host='', port='')
assert mock_es.call_count == 2
assert_alerts(ea, [hits_timestamps[:2], hits_timestamps[2:]])
call1 = ea.writeback_es.search.call_args_list[6][1]['body']
call2 = ea.writeback_es.search.call_args_list[7][1]['body']
call3 = ea.writeback_es.search.call_args_list[8][1]['body']
assert 'alert_time' in call1['filter']['range']
assert call2['query']['query_string']['query'] == 'aggregate_id:ABCD'
assert call3['query']['query_string']['query'] == 'aggregate_id:CDEF'
assert ea.writeback_es.search.call_args_list[7][1]['size'] == 1337
def test_agg_no_writeback_connectivity(ea):
""" Tests that if writeback_es throws an exception, the matches will be added to 'agg_matches' and when
run again, that they will be passed again to add_aggregated_alert """
hit1, hit2, hit3 = '2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45'
hits = generate_hits([hit1, hit2, hit3])
ea.current_es.search.return_value = hits
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': hit1},
{'@timestamp': hit2},
{'@timestamp': hit3}]
ea.writeback_es.create.side_effect = elasticsearch.exceptions.ElasticsearchException('Nope')
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['agg_matches'] == [{'@timestamp': hit1},
{'@timestamp': hit2},
{'@timestamp': hit3}]
ea.current_es.search.return_value = {'hits': {'hits': []}}
ea.add_aggregated_alert = mock.Mock()
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit1}, ea.rules[0])
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit2}, ea.rules[0])
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit3}, ea.rules[0])
def test_silence(ea):
# Silence test rule for 4 hours
ea.args.rule = 'test_rule.yaml' # Not a real name, just has to be set
ea.args.silence = 'hours=4'
ea.silence()
# Don't alert even with a match
match = [{'@timestamp': '2014-11-17T00:00:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 0
# Mock ts_now() to +5 hours, alert on match
match = [{'@timestamp': '2014-11-17T00:00:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.Elasticsearch'):
# Converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(hours=5)))
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_compound_query_key(ea):
ea.rules[0]['query_key'] = 'this,that,those'
ea.rules[0]['compound_query_key'] = ['this', 'that', 'those']
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP], this='abc', that='def', those=4)
ea.current_es.search.return_value = hits
ea.run_query(ea.rules[0], START, END)
call_args = ea.rules[0]['type'].add_data.call_args_list[0]
assert 'this,that,those' in call_args[0][0][0]
assert call_args[0][0][0]['this,that,those'] == 'abc, def, 4'
def test_silence_query_key(ea):
# Silence test rule for 4 hours
ea.args.rule = 'test_rule.yaml' # Not a real name, just has to be set
ea.args.silence = 'hours=4'
ea.silence()
# Don't alert even with a match
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
ea.rules[0]['query_key'] = 'username'
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 0
# Mock ts_now() to +5 hours, alert on match
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.Elasticsearch'):
# Converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(hours=5)))
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_realert(ea):
hits = ['2014-09-26T12:35:%sZ' % (x) for x in range(60)]
matches = [{'@timestamp': x} for x in hits]
ea.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.rules[0]['realert'] = datetime.timedelta(seconds=50)
ea.rules[0]['type'].matches = matches
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Doesn't alert again
matches = [{'@timestamp': x} for x in hits]
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
ea.rules[0]['type'].matches = matches
assert ea.rules[0]['alert'][0].alert.call_count == 1
# mock ts_now() to past the realert time
matches = [{'@timestamp': hits[0]}]
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.Elasticsearch'):
# mock_ts is converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(minutes=10)))
ea.rules[0]['type'].matches = matches
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
def test_realert_with_query_key(ea):
ea.rules[0]['query_key'] = 'username'
ea.rules[0]['realert'] = datetime.timedelta(minutes=10)
# Alert and silence username: qlo
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Dont alert again for same username
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Do alert with a different value
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': ''}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
# Alert with query_key missing
match = [{'@timestamp': '2014-11-17T00:05:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 3
# Still alert with a different value
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': 'ghengis_khan'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 4
def test_count(ea):
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'doctype'
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
# Assert that es.count is run against every run_every timeframe between START and END
start = START
query = {'query': {'filtered': {'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}}
while END - start > ea.run_every:
end = start + ea.run_every
query['query']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['lte'] = dt_to_ts(end)
query['query']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['gt'] = dt_to_ts(start)
start = start + ea.run_every
ea.current_es.count.assert_any_call(body=query, doc_type='doctype', index='idx', ignore_unavailable=True)
def run_and_assert_segmented_queries(ea, start, end, segment_size):
with mock.patch.object(ea, 'run_query') as mock_run_query:
ea.run_rule(ea.rules[0], end, start)
original_end, original_start = end, start
for call_args in mock_run_query.call_args_list:
end = min(start + segment_size, original_end)
assert call_args[0][1:3] == (start, end)
start += segment_size
# Assert elastalert_status was created for the entire time range
assert ea.writeback_es.create.call_args_list[-1][1]['body']['starttime'] == dt_to_ts(original_start)
assert ea.writeback_es.create.call_args_list[-1][1]['body']['endtime'] == dt_to_ts(original_end)
def test_query_segmenting(ea):
# buffer_time segments with normal queries
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=53)
mock_es = mock.Mock()
mock_es.search.side_effect = _duplicate_hits_generator([START_TIMESTAMP])
with mock.patch('elastalert.elastalert.Elasticsearch') as mock_es_init:
mock_es_init.return_value = mock_es
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# Assert that num_hits correctly includes the 1 hit per query
assert ea.num_hits == ea.current_es.search.call_count
# run_every segments with count queries
ea.rules[0]['use_count_query'] = True
with mock.patch('elastalert.elastalert.Elasticsearch'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
# run_every segments with terms queries
ea.rules[0].pop('use_count_query')
ea.rules[0]['use_terms_query'] = True
with mock.patch('elastalert.elastalert.Elasticsearch'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
def test_get_starttime(ea):
endtime = '2015-01-01T00:00:00Z'
mock_es = mock.Mock()
mock_es.search.return_value = {'hits': {'hits': [{'_source': {'endtime': endtime}}]}}
ea.writeback_es = mock_es
# 4 days old, will return endtime
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
mock_ts.return_value = ts_to_dt('2015-01-05T00:00:00Z') # 4 days ahead of the endtime
assert ea.get_starttime(ea.rules[0]) == ts_to_dt(endtime)
# 10 days old, will return None
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
mock_ts.return_value = ts_to_dt('2015-01-11T00:00:00Z') # 10 days ahead of the endtime
assert ea.get_starttime(ea.rules[0]) is None
def test_set_starttime(ea):
# standard query, no starttime, no last run
end = ts_to_dt('2014-10-10T10:10:10')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# Standard query, no starttime, rule specific buffer_time
ea.rules[0].pop('starttime')
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=37)
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == end - datetime.timedelta(minutes=37)
ea.rules[0].pop('buffer_time')
# Standard query, no starttime, last run
ea.rules[0].pop('starttime')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = ts_to_dt('2014-10-10T00:00:00')
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == ts_to_dt('2014-10-10T00:00:00')
# Standard query, no starttime, last run, assure buffer_time doesn't go past
ea.rules[0].pop('starttime')
ea.rules[0]['buffer_time'] = datetime.timedelta(weeks=1000)
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = ts_to_dt('2014-10-09T00:00:00')
# First call sets minumum_time
ea.set_starttime(ea.rules[0], end)
# Second call uses buffer_time, but it goes past minimum
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == ts_to_dt('2014-10-09T00:00:00')
# Standard query, starttime
ea.rules[0].pop('buffer_time')
ea.rules[0].pop('minimum_starttime')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 0
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# Count query, starttime, no previous endtime
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'blah'
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 0
assert ea.rules[0]['starttime'] == end - ea.run_every
# Count query, with previous endtime
with mock.patch('elastalert.elastalert.Elasticsearch'):
ea.run_rule(ea.rules[0], END, START)
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == END
# buffer_time doesn't go past previous endtime
ea.rules[0].pop('use_count_query')
ea.rules[0]['previous_endtime'] = end - ea.buffer_time * 2
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == ea.rules[0]['previous_endtime']
def test_kibana_dashboard(ea):
match = {'@timestamp': '2014-10-11T00:00:00'}
mock_es = mock.Mock()
ea.rules[0]['use_kibana_dashboard'] = 'my dashboard'
with mock.patch('elastalert.elastalert.Elasticsearch') as mock_es_init:
mock_es_init.return_value = mock_es
# No dashboard found
mock_es.search.return_value = {'hits': {'hits': []}}
with pytest.raises(EAException):
ea.use_kibana_link(ea.rules[0], match)
mock_call = mock_es.search.call_args_list[0][1]
assert mock_call['body'] == {'query': {'term': {'_id': 'my dashboard'}}}
# Dashboard found
mock_es.create.return_value = {'_id': 'ABCDEFG'}
mock_es.search.return_value = {'hits': {'hits': [{'_source': {'dashboard': json.dumps(dashboard_temp)}}]}}
url = ea.use_kibana_link(ea.rules[0], match)
assert 'ABCDEFG' in url
db = json.loads(mock_es.create.call_args_list[0][1]['body']['dashboard'])
assert 'anytest' in db['title']
# Query key filtering added
ea.rules[0]['query_key'] = 'foobar'
match['foobar'] = 'baz'
url = ea.use_kibana_link(ea.rules[0], match)
db = json.loads(mock_es.create.call_args_list[-1][1]['body']['dashboard'])
assert db['services']['filter']['list']['1']['field'] == 'foobar'
assert db['services']['filter']['list']['1']['query'] == '"baz"'
# Compound query key
ea.rules[0]['query_key'] = 'foo,bar'
ea.rules[0]['compound_query_key'] = ['foo', 'bar']
match['foo'] = 'cat'
match['bar'] = 'dog'
match['foo,bar'] = 'cat, dog'
url = ea.use_kibana_link(ea.rules[0], match)
db = json.loads(mock_es.create.call_args_list[-1][1]['body']['dashboard'])
found_filters = 0
for filter_id, filter_dict in db['services']['filter']['list'].items():
if (filter_dict['field'] == 'foo' and filter_dict['query'] == '"cat"') or \
(filter_dict['field'] == 'bar' and filter_dict['query'] == '"dog"'):
found_filters += 1
continue
assert found_filters == 2
def test_rule_changes(ea):
ea.rule_hashes = {'rules/rule1.yaml': 'ABC',
'rules/rule2.yaml': 'DEF'}
ea.rules = [ea.init_rule(rule, True) for rule in [{'rule_file': 'rules/rule1.yaml', 'name': 'rule1', 'filter': []},
{'rule_file': 'rules/rule2.yaml', 'name': 'rule2', 'filter': []}]]
ea.rules[1]['processed_hits'] = ['save me']
new_hashes = {'rules/rule1.yaml': 'ABC',
'rules/rule3.yaml': 'XXX',
'rules/rule2.yaml': '!@#$'}
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
mock_load.side_effect = [{'filter': [], 'name': 'rule2', 'rule_file': 'rules/rule2.yaml'},
{'filter': [], 'name': 'rule3', 'rule_file': 'rules/rule3.yaml'}]
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
# All 3 rules still exist
assert ea.rules[0]['name'] == 'rule1'
assert ea.rules[1]['name'] == 'rule2'
assert ea.rules[1]['processed_hits'] == ['save me']
assert ea.rules[2]['name'] == 'rule3'
# Assert 2 and 3 were reloaded
assert mock_load.call_count == 2
mock_load.assert_any_call('rules/rule2.yaml', ea.conf)
mock_load.assert_any_call('rules/rule3.yaml', ea.conf)
# A new rule with a conflicting name wont load
new_hashes = copy.copy(new_hashes)
new_hashes.update({'rules/rule4.yaml': 'asdf'})
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
with mock.patch.object(ea, 'send_notification_email') as mock_send:
mock_load.return_value = {'filter': [], 'name': 'rule3', 'new': 'stuff', 'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
mock_send.assert_called_once()
assert len(ea.rules) == 3
assert not any(['new' in rule for rule in ea.rules])
# An old rule which didn't load gets reloaded
new_hashes = copy.copy(new_hashes)
new_hashes['rules/rule4.yaml'] = 'qwerty'
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
mock_load.return_value = {'filter': [], 'name': 'rule4', 'new': 'stuff', 'rule_file': 'rules/rule4.yaml'}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 4
def test_strf_index(ea):
""" Test that the get_index function properly generates indexes spanning days """
ea.rules[0]['index'] = 'logstash-%Y.%m.%d'
ea.rules[0]['use_strftime_index'] = True
# Test formatting with times
start = ts_to_dt('2015-01-02T12:34:45Z')
end = ts_to_dt('2015-01-02T16:15:14Z')
assert ea.get_index(ea.rules[0], start, end) == 'logstash-2015.01.02'
end = ts_to_dt('2015-01-03T01:02:03Z')
assert ea.get_index(ea.rules[0], start, end) == 'logstash-2015.01.02,logstash-2015.01.03'
# Test formatting for wildcard
assert ea.get_index(ea.rules[0]) == 'logstash-*'
ea.rules[0]['index'] = 'logstash-%Y.%m'
assert ea.get_index(ea.rules[0]) == 'logstash-*'
ea.rules[0]['index'] = 'logstash-%Y.%m-stuff'
assert ea.get_index(ea.rules[0]) == 'logstash-*-stuff'
def test_count_keys(ea):
ea.rules[0]['timeframe'] = datetime.timedelta(minutes=60)
ea.rules[0]['top_count_keys'] = ['this', 'that']
ea.rules[0]['type'].matches = {'@timestamp': END}
ea.rules[0]['doc_type'] = 'blah'
buckets = [{'aggregations': {'filtered': {'counts': {'buckets': [{'key': 'a', 'doc_count': 10}, {'key': 'b', 'doc_count': 5}]}}}},
{'aggregations': {'filtered': {'counts': {'buckets': [{'key': 'd', 'doc_count': 10}, {'key': 'c', 'doc_count': 12}]}}}}]
ea.current_es.search.side_effect = buckets
counts = ea.get_top_counts(ea.rules[0], START, END, ['this', 'that'])
calls = ea.current_es.search.call_args_list
assert calls[0][1]['search_type'] == 'count'
assert calls[0][1]['body']['aggs']['filtered']['aggs']['counts']['terms'] == {'field': 'this', 'size': 5}
assert counts['top_events_this'] == {'a': 10, 'b': 5}
assert counts['top_events_that'] == {'d': 10, 'c': 12}
def test_exponential_realert(ea):
ea.rules[0]['exponential_realert'] = datetime.timedelta(days=1) # 1 day ~ 10 * 2**13 seconds
ea.rules[0]['realert'] = datetime.timedelta(seconds=10)
until = ts_to_dt('2015-03-24T00:00:00')
ts5s = until + datetime.timedelta(seconds=5)
ts15s = until + datetime.timedelta(seconds=15)
ts1m = until + datetime.timedelta(minutes=1)
ts5m = until + datetime.timedelta(minutes=5)
ts4h = until + datetime.timedelta(hours=4)
test_values = [(ts5s, until, 0), # Exp will increase to 1, 10*2**0 = 10s
(ts15s, until, 0), # Exp will stay at 0, 10*2**0 = 10s
(ts15s, until, 1), # Exp will increase to 2, 10*2**1 = 20s
(ts1m, until, 2), # Exp will decrease to 1, 10*2**2 = 40s
(ts1m, until, 3), # Exp will increase to 4, 10*2**3 = 1m20s
(ts5m, until, 1), # Exp will lower back to 0, 10*2**1 = 20s
(ts4h, until, 9), # Exp will lower back to 0, 10*2**9 = 1h25m
(ts4h, until, 10), # Exp will lower back to 9, 10*2**10 = 2h50m
(ts4h, until, 11)] # Exp will increase to 12, 10*2**11 = 5h
results = (1, 0, 2, 1, 4, 0, 0, 9, 12)
next_res = iter(results)
for args in test_values:
ea.silence_cache[ea.rules[0]['name']] = (args[1], args[2])
next_alert, exponent = ea.next_alert_time(ea.rules[0], ea.rules[0]['name'], args[0])
assert exponent == next_res.next()
def test_stop(ea):
""" The purpose of this test is to make sure that calling ElastAlerter.stop() will break it
out of a ElastAlerter.start() loop. This method exists to provide a mechanism for running
ElastAlert with threads and thus must be tested with threads. mock_loop verifies the loop
is running and will call stop after several iterations. """
# Exit the thread on the fourth iteration
def mock_loop():
for i in range(3):
assert ea.running
yield
ea.stop()
with mock.patch.object(ea, 'sleep_for', return_value=None):
with mock.patch.object(ea, 'run_all_rules') as mock_run:
mock_run.side_effect = mock_loop()
start_thread = threading.Thread(target=ea.start)
# Set as daemon to prevent a failed test from blocking exit
start_thread.daemon = True
start_thread.start()
# Give it a few seconds to run the loop
start_thread.join(5)
assert not ea.running
assert not start_thread.is_alive()
assert mock_run.call_count == 4
def test_uncaught_exceptions(ea):
e = Exception("Errors yo!")
# With disabling set to false
ea.disable_rules_on_error = False
ea.handle_uncaught_exception(e, ea.rules[0])
assert len(ea.rules) == 1
assert len(ea.disabled_rules) == 0
# With disabling set to true
ea.disable_rules_on_error = True
ea.handle_uncaught_exception(e, ea.rules[0])
assert len(ea.rules) == 0
assert len(ea.disabled_rules) == 1
# Changing the file should re-enable it
ea.rule_hashes = {'rule1': 'abc'}
new_hashes = {'rule1': 'def'}
with mock.patch('elastalert.elastalert.get_rule_hashes') as mock_hashes:
with mock.patch('elastalert.elastalert.load_configuration') as mock_load:
mock_load.side_effect = [ea.disabled_rules[0]]
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 1
assert len(ea.disabled_rules) == 0
# Notify email is sent
ea.notify_email = 'qlo@example.com'
with mock.patch.object(ea, 'send_notification_email') as mock_email:
ea.handle_uncaught_exception(e, ea.rules[0])
assert mock_email.call_args_list[0][1] == {'exception': e, 'rule': ea.disabled_rules[0]}
|
|
#@PydevCodeAnalysisIgnore
__author__ = 'Daan Wierstra, daan@idsia.ch'
from scipy import zeros, tanh
from module import Module
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.tools.functions import sigmoid, sigmoidPrime, tanhPrime
class LSTMRTRLBlock(Module, ParameterContainer):
""" long short-term memory implemented with RTRL; incoming connections
and recurrent connections are included within this block! """
sequential = True
def __init__(self, indim, outdim, peepholes = False, name = None):
nrNeurons = outdim
self.peep = peepholes
# internal buffers:
self.ingate = zeros((0,nrNeurons))
self.outgate = zeros((0,nrNeurons))
self.forgetgate = zeros((0,nrNeurons))
self.cell = zeros((0,nrNeurons))
self.ingatex = zeros((0,nrNeurons))
self.outgatex = zeros((0,nrNeurons))
self.forgetgatex = zeros((0,nrNeurons))
self.cellx = zeros((0,nrNeurons))
self.state = zeros((0,nrNeurons))
self.ingateError = zeros((0,nrNeurons))
self.outgateError = zeros((0,nrNeurons))
self.forgetgateError = zeros((0,nrNeurons))
self.stateError = zeros((0,nrNeurons))
self.Sin = zeros((0,indim*nrNeurons))
self.Sforget = zeros((0,indim*nrNeurons))
self.Scell = zeros((0,indim*nrNeurons))
self.SinRec = zeros((0,nrNeurons*nrNeurons))
self.SforgetRec = zeros((0,nrNeurons*nrNeurons))
self.ScellRec = zeros((0,nrNeurons*nrNeurons))
Module.__init__(self, indim, outdim, name)
if self.peep:
ParameterContainer.__init__(self, nrNeurons*3 + (4*indim+nrNeurons)*nrNeurons)
self.Sin_peep = zeros((0,nrNeurons))
self.Sforget_peep = zeros((0,nrNeurons))
self.Scell_peep = zeros((0,nrNeurons))
else:
ParameterContainer.__init__(self, (4*indim+nrNeurons)*nrNeurons)
self._setParameters(self.params)
self._setDerivatives(self.derivs)
# transfer functions and their derivatives
self.f = sigmoid
self.fprime = sigmoidPrime
self.g = lambda x: 2*tanh(x)
self.gprime = lambda x: 2*tanhPrime(x)
self.h = self.g
self.hprime = self.gprime
def _setParameters(self, p, owner = None):
ParameterContainer._setParameters(self, p, owner)
nrNeurons = self.outdim
first, second = 0, 0
first, second = second, second + indim*nrNeurons
self.ingateConns = self.params[first:second]
first, second = second, second + indim*nrNeurons
self.forgetgateConns = self.params[first:second]
first, second = second, second + indim*nrNeurons
self.cellConns = self.params[first:second]
first, second = second, second + indim*nrNeurons
self.outgateConns = self.params[first:second]
first, second = second, second + nrNeurons*nrNeurons
self.ingateRecConns = self.params[first:second]
first, second = second, second + nrNeurons*nrNeurons
self.forgetgateRecConns = self.params[first:second]
first, second = second, second + nrNeurons*nrNeurons
self.cellRecConns = self.params[first:second]
first, second = second, second + nrNeurons*nrNeurons
self.outgateRecConns = self.params[first:second]
if self.peep:
first, second = second, second + nrNeurons
self.ingatePeepWeights = self.params[first:second]
first, second = second, second + nrNeurons
self.forgetgatePeepWeights = self.params[first:second]
first, second = second, second + nrNeurons
self.outgatePeepWeights = self.params[first:second]
def _setDerivatives(self, d, owner = None):
ParameterContainer._setDerivatives(self, d, owner)
nrNeurons = self.outdim
first, second = 0, 0
first, second = second, second + indim*nrNeurons
self.ingateConnDerivs = self.derivs[first:second]
first, second = second, second + indim*nrNeurons
self.forgetgateConnDerivs = self.derivs[first:second]
first, second = second, second + indim*nrNeurons
self.cellConnDerivs = self.derivs[first:second]
first, second = second, second + indim*nrNeurons
self.outgateConnDerivs = self.derivs[first:second]
first, second = second, second + nrNeurons*nrNeurons
self.ingateRecConnDerivs = self.derivs[first:second]
first, second = second, second + nrNeurons*nrNeurons
self.forgetgateRecConnDerivs = self.derivs[first:second]
first, second = second, second + nrNeurons*nrNeurons
self.cellRecConnDerivs = self.derivs[first:second]
first, second = second, second + nrNeurons*nrNeurons
self.outgateRecConnDerivs = self.derivs[first:second]
if self.peep:
first, second = second, second + nrNeurons
self.ingatePeepDerivs = self.derivs[first:second]
first, second = second, second + nrNeurons
self.forgetgatePeepDerivs = self.derivs[first:second]
first, second = second, second + nrNeurons
self.outgatePeepDerivs = self.derivs[first:second]
def _growBuffers(self):
Module._growBuffers(self)
self.ingate = self._resizeArray(self.ingate)
self.outgate = self._resizeArray(self.outgate)
self.forgetgate = self._resizeArray(self.forgetgate)
self.ingatex = self._resizeArray(self.ingatex)
self.outgatex = self._resizeArray(self.outgatex)
self.forgetgatex = self._resizeArray(self.forgetgatex)
self.cellx = self._resizeArray(self.cellx)
self.state = self._resizeArray(self.state)
self.ingateError = self._resizeArray(self.ingateError)
self.outgateError = self._resizeArray(self.outgateError)
self.forgetgateError = self._resizeArray(self.forgetgateError)
self.stateError = self._resizeArray(self.stateError)
self.Sin = self._resizeArray(self.Sin)
self.Sforget = self._resizeArray(self.Sforget)
self.Scell = self._resizeArray(self.Scell)
self.SinRec = self._resizeArray(self.SinRec)
self.SforgetRec = self._resizeArray(self.SforgetRec)
self.ScellRec = self._resizeArray(self.ScellRec)
if self.peep:
self.Sin_peep = self._resizeArray(self.Sin_peep)
self.Sforget_peep = self._resizeArray(self.Sforget_peep)
self.Scell_peep = self._resizeArray(self.Scell_peep)
def reset(self):
Module.reset(self)
self.ingate *= 0
self.outgate *= 0
self.forgetgate *= 0
self.ingatex *= 0
self.cellx *= 0
self.outgatex *= 0
self.forgetgatex *= 0
self.state *= 0
self.ingateError *= 0
self.outgateError *= 0
self.forgetgateError *= 0
self.stateError *= 0
self.Sin *=0
self.Sforget *= 0
self.Scell *= 0
self.SinRec *=0
self.SforgetRec *=0
self.ScellRec *=0
if self.peep:
self.Sin_peep *=0
self.Scell_peep *= 0
self.Sforget_peep *= 0
# todo: set state derivs to 0?
def _forwardImplementation(self, inbuf, outbuf):
nrNeurons = self.outdim
# slicing the input buffer into the 4 parts
self.ingatex[self.time] = dot(reshape(ingateConns, (self.outdim, self.indim)), inbuf)
if self.time > 0:
self.ingatex[self.time] += dot(reshape(ingateRecConns, (self.outdim, nrNeurons)), outputbuffer[self.time - 1])
self.forgetgatex[self.time] = dot(reshape(forgetgateConns, (self.outdim, self.indim)), inbuf)
if self.time > 0:
self.forgetgatex[self.time] += dot(reshape(forgetgateRecConns, (self.outdim, nrNeurons)), outputbuffer[self.time - 1])
self.cellx[self.time] = dot(reshape(cellConns, (self.outdim, self.indim)), inbuf)
if self.time > 0:
self.cellx[self.time] += dot(reshape(cellRecConns, (self.outdim, nrNeurons)), outputbuffer[self.time - 1])
self.outgatex[self.time] = dot(reshape(outgateConns, (self.outdim, self.indim)), inbuf)
if self.time > 0:
self.outgatex[self.time] += dot(reshape(outgateRecConns, (self.outdim, nrNeurons)), outputbuffer[self.time - 1])
# peephole treatment
if self.peep and self.time > 0:
self.ingatex[self.time] += self.ingatePeepWeights * self.state[self.time-1]
self.forgetgatex[self.time] += self.forgetgatePeepWeights * self.state[self.time-1]
self.ingate[self.time] = self.f(self.ingatex[self.time])
self.forgetgate[self.time] = self.f(self.forgetgatex[self.time])
self.state[self.time] = self.ingate[self.time] * self.g(self.cellx[self.time])
if self.time > 0:
self.state[self.time] += self.forgetgate[self.time] * self.state[self.time-1]
if self.peep:
self.outgatex[self.time] += self.outgatePeepWeights * self.state[self.time]
self.outgate[self.time] = self.f(self.outgatex[self.time])
outbuf[:] = self.outgate[self.time] * self.h(self.state[self.time])
# WOOOOOOOWAAA difficult
#self.Scell[self.time][i*nrNeurons:(i+1)*nrNeurons] =
if self.time > 0:
self.Scell[self.time] = self.Scell[self.time - 1]*self.forgetgate[self.time] + \
self.gprime(self.cellx[self.time]) * self.ingate[self.time]
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
nrNeurons = self.outdim
cellx = inbuf[nrNeurons*2:nrNeurons*3]
self.outgateError[self.time] = self.fprime(self.outgatex[self.time]) * outerr * self.h(self.state[self.time])
self.stateError[self.time] = outerr * self.outgate[self.time] * self.hprime(self.state[self.time])
if not self._isLastTimestep():
self.stateError[self.time] += self.stateError[self.time+1] * self.forgetgate[self.time+1]
if self.peep:
self.stateError[self.time] += self.ingateError[self.time+1] * self.ingatePeepWeights
self.stateError[self.time] += self.forgetgateError[self.time+1] * self.forgetgatePeepWeights
if self.peep:
self.stateError[self.time] += self.outgateError[self.time] * self.outgatePeepWeights
cellError = self.ingate[self.time] * self.gprime(cellx) * self.stateError[self.time]
if self.time > 0:
self.forgetgateError[self.time] = self.fprime(self.forgetgatex[self.time]) * self.stateError[self.time] * self.state[self.time-1]
self.ingateError[self.time] = self.fprime(self.ingatex[self.time]) * self.stateError[self.time] * self.g(cellx)
# compute derivatives
if self.peep:
self.outgatePeepDerivs += self.outgateError[self.time] * self.state[self.time]
if self.time > 0:
self.ingatePeepDerivs += self.ingateError[self.time] * self.state[self.time-1]
self.forgetgatePeepDerivs += self.forgetgateError[self.time] * self.state[self.time-1]
inerr[:nrNeurons] = self.ingateError[self.time]
inerr[nrNeurons:nrNeurons*2] = self.forgetgateError[self.time]
inerr[nrNeurons*2:nrNeurons*3] = cellError
inerr[nrNeurons*3:] = self.outgateError[self.time]
|
|
"""Assistant for Generating Trees with Attention and GIST
Data Description:
data:
type: dict
content: {surface_form:
[<path>, <path>, ...],
...}
path:
type: dict
content: {'dtree': tree_features,
'attach_points': attach_point_features,
'attach_target': local_point_index,
'tree_target': tree_index,
'attach_type': (type_index, pos_index)}
tree_features:
type: list
content: [tree_feature_index, tree_feature_index, ...]
note: tree_feature_index is an alais for: tfeat_index or tree_feat_index
attach_point_features:
type: list
content: [[attach_feature_index, ...],
[attach_feature_index, ...],
...]
note: attach_feature_index is an alias for: afeat_index or attach_feat_index
local_point_index:
type: int
content: index to the attach_point_features array
note: the only index that differs in that it references an array
X_index:
type: int
content: vocabulary encoding of X;
see X_vocab
X_vocab:
type: baal.utils.Vocabulary
content: the string-to-integer mapping for X domain;
X:
tree_index := encodes the elementary trees
tree_feature := encodes the features of trees
attach_feature := encodes the features of attachment points
type := encodes the tree type (currently: ins for insert and sub for substitution)
pos := encodes the part of speech of the root node (the tree operation node)
"""
from __future__ import division, print_function
import yaml
import json
import pickle
import math
import itertools
import lasagne
import baal
import numpy as np
import theano
import os
from baal.utils.timer import Timer
from baal.utils import Vocabulary
from baal.utils import VocabManager
from .gist_base import GistIgor
try:
import cPickle as pickle
except:
import pickle
try:
input = raw_input
except:
pass
class utils:
#log = loggers.duallog("decode")
@staticmethod
def unzip(arr, i):
assert isinstance(arr, list)
return [x[i] for x in arr]
@staticmethod
def indices(mat):
return np.arange(len(mat)).reshape(len(mat),1), mat
@staticmethod
def make_data_matrices(ref):
return (np.zeros(ref.dtree_shape, dtype=theano.config.floatX),
np.zeros(ref.attach_shape, dtype=theano.config.floatX),
np.zeros(ref.etree_shape, dtype=theano.config.floatX),
np.zeros(ref.next_dtree_shape, dtype=theano.config.floatX))
@staticmethod
def parse2predicates(input_parse):
baal.utils.hlf.reset()
entry = Entry.make(bracketed_string=input_parse)
tree, surface = entry.tree, entry.get_lexical()
tree_enrichment.populate_annotations(tree)
tree_enrichment.recursive_spine_fix(tree)
_, new_addressbook = tree.clone()
predicates = simple_hlf.from_addressbook(new_addressbook, preprocess=True)
return predicates, surface
@staticmethod
def pad_to(arr, sh):
pad_width = tuple((0,max(m-n,0)) for n,m in zip(arr.shape, sh))
return np.pad(arr, pad_width=pad_width,
mode='constant', constant_values=0)
@staticmethod
def pad_right(arr, sh):
pad_dims = tuple((0,max(sh_n-arr_n,0)) for arr_n, sh_n in zip(arr.shape[1:], sh))
pad_width = ((0,0),) + pad_dims
return np.pad(arr, pad_width=pad_width,
mode='constant', constant_values=0)
@staticmethod
def vstack(arrs, ndim=2, mask=None):
""" a ton of ifs... but a ton of edge cases. should work for a range of cases """
out = None
for arr in arrs:
if arr is None:
continue
if isinstance(arr, list):
arr = np.array(arr)
if arr.ndim < ndim:
arr = arr.reshape((1,)*(ndim-arr.ndim)+arr.shape)
if out is None:
out = arr
mask = mask if mask is not None else np.ones_like(arr)
else:
sh = tuple(max(i,j) for i,j in zip(out.shape[1:],arr.shape[1:]))
out = np.vstack((utils.pad_right(out,sh),
utils.pad_right(arr,sh)))
mask = np.vstack((utils.pad_right(mask,sh),
utils.pad_right(np.ones_like(arr),sh)))
return out, mask
@staticmethod
def sub_mask(mask):
return [slice(-1)] + [slice(None, i) for i in mask.shape]
class Attender(GistIgor):
"""This version will handle the attention-based model
It will generate data that includes attachment points
"""
def initialize(self):
"""In GistIgor, the data is loaded from pickles into:
self.train_data
self.val_data
self.vocab (superfluous?)
"""
super(Attender, self).initialize()
# shape: (num_type, num_pos, num_elemtrees, 2)
# it is two because an elem tree can only have 2 base features:
# head word and template.
F = lambda v: os.path.join(self.vocab_dir, v)
self.subsets = np.load(F(self.attention_subsets_file))
noise_file = self.__dict__[self.selected_noise]
self.nce_noise = np.load(F(noise_file))
self.vocman = VocabManager()
self.vocman.add("tree_feats", filename=F(self.tree_feature_vocab))
self.vocman.add("attach_feats", filename=F(self.attach_feature_vocab))
self.vocman.add("head_map", filename=F(self.head_map_filename))
self.tree_feature_size = len(self.vocman.tree_feats)
self.attachment_feature_size = len(self.vocman.attach_feats)
self.max_num_elemtrees = self.subsets.shape[2]
self.train_data, mtrain = self.flatten_data(self.train_data)
self.num_training_datapoints = len(self.train_data)
self.val_data, mval = self.flatten_data(self.val_data)
self.num_val_datapoints = len(self.val_data)
self.max_num_attachments = max(mtrain,mval)
#try:
# train_max_att = max(len(node['attach_points']) for paths in self.train_data.values()
# for path in paths for node in path)#
# val_max_att = max((len(node['attach_points']) for paths in self.val_data.values()
# for path in paths for node in path))
# self.max_num_attachments = max(train_max_att, val_max_att)
# #assert self.max_num_attachments >= val_max_att
#except Exception as e:
# import pdb
# pdb.set_trace()
#self.num_training_datapoints = sum(len(path) for pathset in self.train_data.values()
# for path in pathset)
#self.num_val_datapoints = sum(len(path) for pathset in self.val_data.values()
# for path in pathset)
def flatten_data(self, data):
nodes = []
max_attachments = 0.
bad_datums = 0.
for sf,pathset in data.items():
for path in pathset:
for node in path:
if len(node.keys()) == 5:
node['attach_type'] = node['tree_target']
node['tree_target'] = node['attach_head']
node['attach_head'] = None
node['attach_points'] = [[v for v in vs if v]
for vs in node['attach_points']]
if any(len(v) == 0 for v in node['attach_points']):
raise Exception("Bad attachment point features; shouldnt be happening")
node['dtree'] = [v for v in node['dtree'] if v]
if len(node['dtree']) == 0:
raise Exception("Bad tree features. Shouldn't be happening")
if node['tree_target'] is None:
bad_datums += 1
continue
nodes.append(node)
max_attachments = max(max_attachments, len(node['attach_points']))
print("Threw away {} data points because of unseen target trees.".format(bad_datums))
print("Left with {} data points".format(len(nodes)))
return nodes, max_attachments
@property
def default_Fout(self):
#return lambda x: theano.tensor.maximum(x, 0.)
return lasagne.nonlinearities.LeakyRectify(0.1)
#return getattr(lasagne.nonlinearities, self.default_nonlinearity)
@property
def default_initializer(self):
glo = lasagne.init.GlorotUniform(gain='relu')
#glo.set_positive()
return glo
@property
def linear_initializer(self):
return lasagne.init.GlorotUniform()
@property
def val_server(self):
return self._compact_server(self._iter(self.val_data))
return self._server(self._iter(self.val_data))
@property
def training_server(self):
return self._compact_server(self._iter(self.train_data))
return self._server(self._iter(self.train_data))
def debug(self, *args, **kwargs):
""" this will let me do a whole bunch of neat stuff to the debug statements """
# for now:
self.logger.debug(" | ".join(args))
def make_nce_data(self, attach_type, tree_index=None, tree_feats=None, alternate=False):
"""get the trees resulting from the noise distribution
Note: this argument is agnostic to the noise distribution.
it should be passed in as the nce_sampler filename rather than
be any sort of algorithmic choice.
The construction happens during the feature generation.
There, it constructs the count tensor and normalizes across the count dimension.
It also constructs the flattened empirical unigram, which
"""
tree_distribution = self.nce_noise[attach_type]
if self.nce_size > len(tree_distribution.nonzero()[0]) or alternate:
tree_distribution = self.nce_noise.sum(axis=(0,1))
tree_distribution /= tree_distribution.sum()
sample_space = np.arange(self.max_num_elemtrees)
sample_space = np.delete(sample_space, tree_index)
sample_distribution = np.delete(tree_distribution, tree_index)
sample_distribution /= sample_distribution.sum()
samples = np.random.choice(sample_space,
size=self.nce_size,
replace=False,
p=sample_distribution)
all_trees = np.concatenate((samples, np.array([tree_index]))).astype(np.int32)
target_ind = np.random.randint(0, self.nce_size)
all_trees[-1], all_trees[target_ind] = all_trees[target_ind], all_trees[-1]
all_noise = tree_distribution[all_trees]
if all_noise.sum() > 1.0:
print("THIS IS IN ERROR")
import pdb
pdb.set_trace()
return self.subsets[attach_type][all_trees,:].astype(np.int32), target_ind, all_noise
def _iter(self, data):
"""interface between data dict and numpy matrix creation
"""
epoch_size = len(data) // self.batch_size * self.batch_size
self.logger.debug("{} dp total across batch sizes of {}".format(epoch_size, self.batch_size))
for data_i in np.random.choice(len(data),epoch_size, replace=False):
node = data[data_i]
dtree_feats = np.array(node['dtree'])
attach_feats = [x for x in node['attach_points']]
attach_target = node['attach_target']
tree_ind = node['tree_target']
ap_head = node['attach_head']
if tree_ind is None:
raise Exception("this shouldn't be happening; encountered bad data")
atype = node['attach_type']
#### make the NCE data
etree_feats, target_id, noise = self.make_nce_data(atype,
tree_index=tree_ind,
alternate=data_i%2)
#### binding feature stuff
## note: implicit assumption that self.nce_size+1 == etree_feats.shape[0]
binding_feats = np.zeros(self.nce_size+1, dtype=np.int32)
for etree_i, etree in enumerate(etree_feats):
etree_head_ind = etree[0]
key = (ap_head, etree_head_ind, atype)
if key in self.vocman.head_map:
binding_feats[etree_i] = self.vocman.head_map[key]
#### output
yield (dtree_feats, attach_feats, attach_target,
etree_feats, target_id, binding_feats, noise)
def _server(self, dataiter):
"""
d_tree_in: the derivation trees at time t
attach_in: the attachments on the derivation trees
E_target: the indices of the correct elementary trees
A_target: the indices of the correct attachments
X_next: the derivation trees that result from possible elem trees
e_filter: the filter from all elem trees to the subset of possible elem trees
e_filter and X_next should have same dimensions because:
E[:, e_filter] = E_filtered # (h_e, all_e) -> (h_e, e_subset)
F_align(T, C) * E_filtered = R_E # (b, h_e) * (h_e, e_subset) -> (b, e_subset)
Q = R_E + X_next
"""
dtree_input_shape = (self.batch_size, self.tree_feature_size)
next_tree_shape = (self.batch_size, self.nce_size+1, self.tree_feature_size)
attach_input_shape = (self.batch_size, self.max_num_attachments, self.attachment_feature_size)
etree_target_shape = (self.batch_size, self.nce_size+1)
attach_target_shape = (self.batch_size, self.max_num_attachments)
f = lambda x: "{} -> {}".format(x, reduce(lambda a,b:a*b, x))
self.debug("dtree input shape", f(dtree_input_shape))
self.debug("attach input shape", f(attach_input_shape))
self.debug("next tree shape", f(next_tree_shape))
self.debug("etree target shape", f(etree_target_shape))
self.debug("attach target shape", f(attach_target_shape))
while dataiter:
next_batch = list(itertools.islice(dataiter, 0, self.batch_size))
if len(next_batch) < self.batch_size:
print("End of batch; ({})".format(len(next_batch)))
raise StopIteration
# convention: X is input; Y is target; approximating function F(X,Y)
# negative samplings gives us YNOT.
deriv_tree_X = np.zeros(dtree_input_shape, dtype=theano.config.floatX)
attachments_X = np.zeros(attach_input_shape, dtype=theano.config.floatX)
attachments_Y = np.zeros(attach_target_shape, dtype=theano.config.floatX)
# the second input route
elem_trees_X2 = np.zeros(next_tree_shape, dtype=theano.config.floatX)
deriv_trees_X2 = np.zeros(next_tree_shape, dtype=theano.config.floatX)
# for computing NCE
tree_noise = np.zeros(etree_target_shape, dtype=theano.config.floatX)
elem_trees_Y = np.zeros(etree_target_shape, dtype=theano.config.floatX)
for i, (dtree_feats, attach_feats, attach_target, etree_feats,
tree_target, binding_feats, noise) in enumerate(next_batch):
#### inputs
deriv_tree_X[i, dtree_feats] = 1
for j, feats in enumerate(attach_feats):
attachments_X[i, j, feats] = 1
#### targets
elem_trees_Y[i, tree_target] = 1
attachments_Y[i, attach_target] = 1
#### nce data for elementary trees
### the idea: we broadcast an indexing array (idx)
### across the num_etrees * num_feats column
## this way, we can index the tensor with two arrays
## otherwise, it's super annoying
## it's much easier to have a 1:1 correspondance for points
## even if that means repeating indices in idx (which we do)
idx = np.arange(etree_feats.shape[0])[:,np.newaxis]
idx = (idx * np.ones(etree_feats.shape)).flatten().astype(np.int32)
elem_trees_X2[i, idx, etree_feats.flatten()] = 1
#### nce data for derivation trees
## the idea: we want the next tree feats to have the current dtree's feats
## plus the feats of the elem tree of choice
## additionally, we need it to have the binding feat
## thing means looking up the head of the attach point
## looking up the head of the elem tree
## checking to see if it matches a feature
## and then including that feature index
bc_shape = (etree_feats.shape[0],len(dtree_feats))
dtree_feats = np.broadcast_to(np.array(dtree_feats), bc_shape)
composed_feats = np.concatenate((etree_feats, dtree_feats), axis=1)
#composed_feats = feats + dtree_feats + bfeat
idx = np.arange(composed_feats.shape[0])[:,np.newaxis]
idx = (idx * np.ones(composed_feats.shape, dtype=np.int32)).flatten()
deriv_trees_X2[i, idx, composed_feats.flatten()] = 1
deriv_trees_X2[i, binding_feats.nonzero(),
binding_feats[binding_feats.nonzero()]] = 1
#### the noise
tree_noise[i,:] = noise
for x in (deriv_tree_X, attachments_X, elem_trees_X2, deriv_trees_X2,
attachments_Y, elem_trees_Y, tree_noise):
if np.any(np.isnan(x)):
raise Exception("FOUND A NAN")
yield (deriv_tree_X, attachments_X, elem_trees_X2, deriv_trees_X2,
attachments_Y, elem_trees_Y, tree_noise)
def _compact_server(self, dataiter):
"""
d_tree_in: the derivation trees at time t
attach_in: the attachments on the derivation trees
E_target: the indices of the correct elementary trees
A_target: the indices of the correct attachments
X_next: the derivation trees that result from possible elem trees
e_filter: the filter from all elem trees to the subset of possible elem trees
e_filter and X_next should have same dimensions because:
E[:, e_filter] = E_filtered # (h_e, all_e) -> (h_e, e_subset)
F_align(T, C) * E_filtered = R_E # (b, h_e) * (h_e, e_subset) -> (b, e_subset)
Q = R_E + X_next
"""
while dataiter:
dtree_X, dtree_mask = None, None
attach_X, attach_mask = None, None
etree_X, etree_mask = None, None
nexttree_X, nexttree_mask = None, None
noise_X = None
etree_Y = None
attach_Y = None
next_batch = list(itertools.islice(dataiter, 0, self.batch_size))
if len(next_batch) < self.batch_size:
print("End of batch; ({})".format(len(next_batch)))
raise StopIteration
for i, (dtree_feats, attach_feats, attach_target, etree_feats,
tree_target, binding_feats, noise) in enumerate(next_batch):
### derivation tree input
dtree_X, dtree_mask = utils.vstack([dtree_X, dtree_feats],
mask=dtree_mask)
### attachment input
_feats, _mask = utils.vstack(attach_feats)
attach_X, attach_mask = utils.vstack([attach_X, _feats], ndim=3,
mask=attach_mask)
attach_mask[utils.sub_mask(_mask)] = _mask
### attachment label
_Y = np.zeros(len(attach_feats))
_Y[attach_target] = 1
attach_Y, _ = utils.vstack([attach_Y, _Y])
### elementary tree input
_feats, _mask = utils.vstack(etree_feats)
etree_X, etree_mask = utils.vstack([etree_X, _feats], ndim=3,
mask=etree_mask)
etree_mask[utils.sub_mask(_mask)] = _mask
### elementary tree label
_Y = np.zeros(len(etree_feats))
_Y[tree_target] = 1
etree_Y, _ = utils.vstack([etree_Y, _Y])
### noise
noise_X, _ = utils.vstack([noise_X, noise])
### next derivation tree
compose_shape = (etree_feats.shape[0], dtree_feats.shape[0])
dtree_broadcast = np.broadcast_to(dtree_feats, compose_shape)
composed = np.concatenate((dtree_broadcast,
etree_feats,
binding_feats[:,None]), axis=1)
### next derivationt tree; approximating features
_feats, _mask = utils.vstack(composed)
_mask[:,-1] = binding_feats
nexttree_X, nexttree_mask = utils.vstack([nexttree_X, _feats], ndim=3,
mask=nexttree_mask)
nexttree_mask[utils.sub_mask(_mask)] = _mask
import pdb
#pdb.set_trace()
yield (dtree_X, dtree_mask[...,None], attach_X, attach_mask[...,None],
attach_Y, etree_X, etree_mask[...,None], etree_Y, nexttree_X,
nexttree_mask[...,None], noise_X)
# paranoid checks
#for x in (deriv_tree_X, attachments_X, elem_trees_X2, deriv_trees_X2,
# attachments_Y, elem_trees_Y, tree_noise):
# if np.any(np.isnan(x)):
# raise Exception("FOUND A NAN")
#yield (deriv_tree_X, attachments_X, elem_trees_X2, deriv_trees_X2,
# attachments_Y, elem_trees_Y, tree_noise)
def report(self):
if len(self.observations) == 0:
if self.verbose:
raise Exception("I can't report; failing loudly")
elif self.logger:
self.logger.warning("Can't report. Failing silently")
else:
return
obs_time, obs = self.observations[-1]
epoch, train_loss, ttree_acc5, tattach_acc5, val_loss, vtree_acc5, vattach_acc5 = obs
if self.logger:
self.logger.info("Epoch {}".format(epoch))
self.logger.info("\tLearning Rate: {}".format(self.learning_rate))
self.logger.info("\tRegularizing Lambda: {}".format(self.reg_lambda))
self.logger.info("\tTraining Loss: {}".format(train_loss))
self.logger.info("\tTraining Tree Accuracy: {}".format(ttree_acc5))
self.logger.info("\tTraining Attach Accuracy: {}".format(tattach_acc5))
self.logger.info("\tValidation Loss: {}".format(val_loss))
self.logger.info("\tValidation Tree Accuracy: {}: ".format(vtree_acc5))
self.logger.info("\tValidation Atttach Accuracy: {}".format(vattach_acc5))
self.logger.info("\tLoss Ratio (Val/Train): {}".format(val_loss/train_loss))
if self.verbose:
print("Epoch {}".format(epoch))
print("\tTraining Loss: {}".format(train_loss))
print("\tValidation Loss: {}".format(val_loss))
print("\tLoss Ratio (Val/Train): {}".format(val_loss/train_loss))
self.scribe.record(epoch, type="epoch")
self.scribe.record(train_loss, type="training loss")
self.scribe.record(ttree_acc5, type="training tree acc5")
self.scribe.record(tattach_acc5, type="training attach acc5")
self.scribe.record(val_loss, type="validation loss")
self.scribe.record(vtree_acc5, type="validation tree acc5")
self.scribe.record(vattach_acc5, type="validation attach acc5")
"""
def get_alternates_deprecated(self, tree_ind, atype):
alts = self.subsets[atype]
alt_ids = alts[:,0].nonzero()[0]
alt_feats = alts[alt_ids]
trick = np.zeros(alts.shape[0])
trick[tree_ind] = 1
target_id = trick[alt_ids].nonzero()[0][0]
return alt_feats, target_id
"""
|
|
# Copyright (c) Ralph Meijer.
# See LICENSE for details.
"""
Tests for L{wokkel.component}.
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyObject
from twisted.internet.base import BaseConnector
from twisted.internet.error import ConnectionRefusedError
from twisted.internet.task import Clock
from twisted.python import failure
from twisted.trial import unittest
from twisted.words.protocols.jabber import xmlstream
from twisted.words.protocols.jabber.ijabber import IXMPPHandlerCollection
from twisted.words.protocols.jabber.jid import JID
from twisted.words.protocols.jabber.xmlstream import XMPPHandler
from twisted.words.xish import domish
from wokkel import component
from wokkel.generic import XmlPipe
class FakeConnector(BaseConnector):
"""
Fake connector that counts connection attempts.
"""
connects = 0
def connect(self):
self.connects += 1
BaseConnector.connect(self)
def _makeTransport(self):
return None
def getDestination(self):
return None
class TestableComponent(component.Component):
"""
Testable component.
This component provides the created factory with a L{Clock}
instead of the regular reactor and uses L{FakeConnector} for testing
connects and reconnects.
"""
def __init__(self, *args, **kwargs):
component.Component.__init__(self, *args, **kwargs)
self.factory.clock = Clock()
def _getConnection(self):
c = FakeConnector(self.factory, None, None)
c.connect()
return c
class ComponentTest(unittest.TestCase):
"""
Tests for L{component.Component}.
"""
def test_startServiceReconnectAfterFailure(self):
"""
When the first connection attempt fails, retry.
"""
comp = TestableComponent('example.org', 5347,
'test.example.org', 'secret')
# Starting the service initiates a connection attempt.
comp.startService()
connector = comp._connection
self.assertEqual(1, connector.connects)
# Fail the connection.
connector.connectionFailed(ConnectionRefusedError())
# After a back-off delay, a new connection is attempted.
comp.factory.clock.advance(5)
self.assertEqual(2, connector.connects)
def test_stopServiceNoReconnect(self):
"""
When the service is stopped, no reconnect is attempted.
"""
comp = TestableComponent('example.org', 5347,
'test.example.org', 'secret')
# Starting the service initiates a connection attempt.
comp.startService()
connector = comp._connection
# Fail the connection.
connector.connectionFailed(ConnectionRefusedError())
# If the service is stopped before the back-off delay expires,
# no new connection is attempted.
comp.factory.clock.advance(1)
comp.stopService()
comp.factory.clock.advance(4)
self.assertEqual(1, connector.connects)
class InternalComponentTest(unittest.TestCase):
"""
Tests for L{component.InternalComponent}.
"""
def setUp(self):
self.router = component.Router()
self.component = component.InternalComponent(self.router, 'component')
def test_interface(self):
"""
L{component.InternalComponent} implements
L{IXMPPHandlerCollection}.
"""
verifyObject(IXMPPHandlerCollection, self.component)
def test_startServiceRunning(self):
"""
Starting the service makes it running.
"""
self.assertFalse(self.component.running)
self.component.startService()
self.assertTrue(self.component.running)
def test_startServiceAddRoute(self):
"""
Starting the service creates a new route.
"""
self.component.startService()
self.assertIn('component', self.router.routes)
def test_startServiceNoDomain(self):
self.component = component.InternalComponent(self.router)
self.component.startService()
def test_startServiceAddMultipleRoutes(self):
"""
Starting the service creates a new route.
"""
self.component.domains.add('component2')
self.component.startService()
self.assertIn('component', self.router.routes)
self.assertIn('component2', self.router.routes)
def test_startServiceHandlerDispatch(self):
"""
Starting the service hooks up handlers.
"""
events = []
class TestHandler(XMPPHandler):
def connectionInitialized(self):
fn = lambda obj: events.append(obj)
self.xmlstream.addObserver('//event/test', fn)
TestHandler().setHandlerParent(self.component)
self.component.startService()
self.assertEquals([], events)
self.component.xmlstream.dispatch(None, '//event/test')
self.assertEquals([None], events)
def test_stopServiceNotRunning(self):
"""
Stopping the service makes it not running.
"""
self.component.startService()
self.component.stopService()
self.assertFalse(self.component.running)
def test_stopServiceRemoveRoute(self):
"""
Stopping the service removes routes.
"""
self.component.startService()
self.component.stopService()
self.assertNotIn('component', self.router.routes)
def test_stopServiceNoDomain(self):
self.component = component.InternalComponent(self.router)
self.component.startService()
self.component.stopService()
def test_startServiceRemoveMultipleRoutes(self):
"""
Starting the service creates a new route.
"""
self.component.domains.add('component2')
self.component.startService()
self.component.stopService()
self.assertNotIn('component', self.router.routes)
self.assertNotIn('component2', self.router.routes)
def test_stopServiceHandlerDispatch(self):
"""
Stopping the service disconnects handlers.
"""
events = []
class TestHandler(XMPPHandler):
def connectionLost(self, reason):
events.append(reason)
TestHandler().setHandlerParent(self.component)
self.component.startService()
self.component.stopService()
self.assertEquals(1, len(events))
def test_addHandler(self):
"""
Adding a handler connects it to the stream.
"""
events = []
class TestHandler(XMPPHandler):
def connectionInitialized(self):
fn = lambda obj: events.append(obj)
self.xmlstream.addObserver('//event/test', fn)
self.component.startService()
self.component.xmlstream.dispatch(None, '//event/test')
self.assertEquals([], events)
TestHandler().setHandlerParent(self.component)
self.component.xmlstream.dispatch(None, '//event/test')
self.assertEquals([None], events)
def test_send(self):
"""
A message sent from the component ends up at the router.
"""
events = []
fn = lambda obj: events.append(obj)
message = domish.Element((None, 'message'))
self.router.route = fn
self.component.startService()
self.component.send(message)
self.assertEquals([message], events)
class RouterTest(unittest.TestCase):
"""
Tests for L{component.Router}.
"""
def test_addRoute(self):
"""
Test route registration and routing on incoming stanzas.
"""
router = component.Router()
routed = []
router.route = lambda element: routed.append(element)
pipe = XmlPipe()
router.addRoute('example.org', pipe.sink)
self.assertEquals(1, len(router.routes))
self.assertEquals(pipe.sink, router.routes['example.org'])
element = domish.Element(('testns', 'test'))
pipe.source.send(element)
self.assertEquals([element], routed)
def test_route(self):
"""
Test routing of a message.
"""
component1 = XmlPipe()
component2 = XmlPipe()
router = component.Router()
router.addRoute('component1.example.org', component1.sink)
router.addRoute('component2.example.org', component2.sink)
outgoing = []
component2.source.addObserver('/*',
lambda element: outgoing.append(element))
stanza = domish.Element((None, 'presence'))
stanza['from'] = 'component1.example.org'
stanza['to'] = 'component2.example.org'
component1.source.send(stanza)
self.assertEquals([stanza], outgoing)
def test_routeDefault(self):
"""
Test routing of a message using the default route.
The default route is the one with L{None} as its key in the
routing table. It is taken when there is no more specific route
in the routing table that matches the stanza's destination.
"""
component1 = XmlPipe()
s2s = XmlPipe()
router = component.Router()
router.addRoute('component1.example.org', component1.sink)
router.addRoute(None, s2s.sink)
outgoing = []
s2s.source.addObserver('/*', lambda element: outgoing.append(element))
stanza = domish.Element((None, 'presence'))
stanza['from'] = 'component1.example.org'
stanza['to'] = 'example.com'
component1.source.send(stanza)
self.assertEquals([stanza], outgoing)
class ListenComponentAuthenticatorTest(unittest.TestCase):
"""
Tests for L{component.ListenComponentAuthenticator}.
"""
def setUp(self):
self.output = []
authenticator = component.ListenComponentAuthenticator('secret')
self.xmlstream = xmlstream.XmlStream(authenticator)
self.xmlstream.send = self.output.append
def loseConnection(self):
"""
Stub loseConnection because we are a transport.
"""
self.xmlstream.connectionLost("no reason")
def test_streamStarted(self):
"""
The received stream header should set several attributes.
"""
observers = []
def addOnetimeObserver(event, observerfn):
observers.append((event, observerfn))
xs = self.xmlstream
xs.addOnetimeObserver = addOnetimeObserver
xs.makeConnection(self)
self.assertIdentical(None, xs.sid)
self.assertFalse(xs._headerSent)
xs.dataReceived("<stream:stream xmlns='jabber:component:accept' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"to='component.example.org'>")
self.assertEqual((0, 0), xs.version)
self.assertNotIdentical(None, xs.sid)
self.assertTrue(xs._headerSent)
self.assertEquals(('/*', xs.authenticator.onElement), observers[-1])
def test_streamStartedWrongNamespace(self):
"""
The received stream header should have a correct namespace.
"""
streamErrors = []
xs = self.xmlstream
xs.sendStreamError = streamErrors.append
xs.makeConnection(self)
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"to='component.example.org'>")
self.assertEquals(1, len(streamErrors))
self.assertEquals('invalid-namespace', streamErrors[-1].condition)
def test_streamStartedNoTo(self):
"""
The received stream header should have a 'to' attribute.
"""
streamErrors = []
xs = self.xmlstream
xs.sendStreamError = streamErrors.append
xs.makeConnection(self)
xs.dataReceived("<stream:stream xmlns='jabber:component:accept' "
"xmlns:stream='http://etherx.jabber.org/streams'>")
self.assertEquals(1, len(streamErrors))
self.assertEquals('improper-addressing', streamErrors[-1].condition)
def test_onElement(self):
"""
We expect a handshake element with a hash.
"""
handshakes = []
xs = self.xmlstream
xs.authenticator.onHandshake = handshakes.append
handshake = domish.Element(('jabber:component:accept', 'handshake'))
handshake.addContent('1234')
xs.authenticator.onElement(handshake)
self.assertEqual('1234', handshakes[-1])
def test_onElementNotHandshake(self):
"""
Reject elements that are not handshakes
"""
handshakes = []
streamErrors = []
xs = self.xmlstream
xs.authenticator.onHandshake = handshakes.append
xs.sendStreamError = streamErrors.append
element = domish.Element(('jabber:component:accept', 'message'))
xs.authenticator.onElement(element)
self.assertFalse(handshakes)
self.assertEquals('not-authorized', streamErrors[-1].condition)
def test_onHandshake(self):
"""
Receiving a handshake matching the secret authenticates the stream.
"""
authd = []
def authenticated(xs):
authd.append(xs)
xs = self.xmlstream
xs.addOnetimeObserver(xmlstream.STREAM_AUTHD_EVENT, authenticated)
xs.sid = u'1234'
theHash = '32532c0f7dbf1253c095b18b18e36d38d94c1256'
xs.authenticator.onHandshake(theHash)
self.assertEqual('<handshake/>', self.output[-1])
self.assertEquals(1, len(authd))
def test_onHandshakeWrongHash(self):
"""
Receiving a bad handshake should yield a stream error.
"""
streamErrors = []
authd = []
def authenticated(xs):
authd.append(xs)
xs = self.xmlstream
xs.addOnetimeObserver(xmlstream.STREAM_AUTHD_EVENT, authenticated)
xs.sendStreamError = streamErrors.append
xs.sid = u'1234'
theHash = '1234'
xs.authenticator.onHandshake(theHash)
self.assertEquals('not-authorized', streamErrors[-1].condition)
self.assertEquals(0, len(authd))
class XMPPComponentServerFactoryTest(unittest.TestCase):
"""
Tests for L{component.XMPPComponentServerFactory}.
"""
def setUp(self):
self.router = component.Router()
self.factory = component.XMPPComponentServerFactory(self.router,
'secret')
self.xmlstream = self.factory.buildProtocol(None)
self.xmlstream.thisEntity = JID('component.example.org')
def test_makeConnection(self):
"""
A new connection increases the stream serial count. No logs by default.
"""
self.xmlstream.dispatch(self.xmlstream,
xmlstream.STREAM_CONNECTED_EVENT)
self.assertEqual(0, self.xmlstream.serial)
self.assertEqual(1, self.factory.serial)
self.assertIdentical(None, self.xmlstream.rawDataInFn)
self.assertIdentical(None, self.xmlstream.rawDataOutFn)
def test_makeConnectionLogTraffic(self):
"""
Setting logTraffic should set up raw data loggers.
"""
self.factory.logTraffic = True
self.xmlstream.dispatch(self.xmlstream,
xmlstream.STREAM_CONNECTED_EVENT)
self.assertNotIdentical(None, self.xmlstream.rawDataInFn)
self.assertNotIdentical(None, self.xmlstream.rawDataOutFn)
def test_onError(self):
"""
An observer for stream errors should trigger onError to log it.
"""
self.xmlstream.dispatch(self.xmlstream,
xmlstream.STREAM_CONNECTED_EVENT)
class TestError(Exception):
pass
reason = failure.Failure(TestError())
self.xmlstream.dispatch(reason, xmlstream.STREAM_ERROR_EVENT)
self.assertEqual(1, len(self.flushLoggedErrors(TestError)))
def test_connectionInitialized(self):
"""
Make sure a new stream is added to the routing table.
"""
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
self.assertIn('component.example.org', self.router.routes)
self.assertIdentical(self.xmlstream,
self.router.routes['component.example.org'])
def test_connectionLost(self):
"""
Make sure a stream is removed from the routing table on disconnect.
"""
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
self.xmlstream.dispatch(None, xmlstream.STREAM_END_EVENT)
self.assertNotIn('component.example.org', self.router.routes)
|
|
""" Functions for manipulating metadata """
import pandocfilters
import shlex
from . import const
from . import info
from . import util
from . import error
def update_metadata(old, new):
""" return `old` updated with `new` metadata """
# 1. Update with values in 'metadata' field
try:
old.update(get_content(new, 'metadata', 'MetaMap'))
except (error.MissingField, KeyError):
pass
except error.WrongType as err:
info.log('WARNING', 'panzer', err)
# 2. Update with values in fields for additive lists
old = update_additive_lists(old, new)
# 3. Update 'template' field
if 'template' in new:
old['template'] = new['template']
return old
def update_additive_lists(old, new):
""" return old updated with info from additive lists in new """
for field in const.RUNLIST_KIND:
try:
try:
new_list = get_content(new, field, 'MetaList')
except error.MissingField:
# field not in incoming metadata, move to next list
continue
try:
old_list = get_content(old, field, 'MetaList')
except error.MissingField:
# field not in old metadata, start with an empty list
old_list = list()
except error.WrongType as err:
# wrong type of value under field, skip to next list
info.log('WARNING', 'panzer', err)
continue
old_list.extend(new_list)
set_content(old, field, old_list, 'MetaList')
return old
def apply_kill_rules(old_list):
""" return old_list after applying kill rules """
new_list = list()
for item in old_list:
# 1. Sanity checks
check_c_and_t_exist(item)
item_content = item[const.C]
item_type = item[const.T]
if item_type != 'MetaMap':
info.log('ERROR', 'panzer',
'fields "' + '", "'.join(const.RUNLIST_KIND) + '" '
'value must be of type "MetaMap"---ignoring 1 item')
continue
if len(item_content.keys() & {'run', 'kill', 'killall'}) != 1:
info.log('ERROR', 'panzer',
'must contain exactly one "run", "kill", '
'or "killall" per item---ignoring 1 item')
continue
# 2. Now operate on content
if 'run' in item_content:
if get_type(item_content, 'run') != 'MetaInlines':
info.log('ERROR', 'panzer',
'"run" value must be of type "MetaInlines"'
'---ignoring 1 item')
continue
new_list.append(item)
elif 'kill' in item_content:
try:
to_be_killed = get_content(item_content, 'kill', 'MetaInlines')
except error.WrongType as err:
info.log('WARNING', 'panzer', err)
continue
new_list = [i for i in new_list
if get_content(i[const.C],
'run',
'MetaInlines') != to_be_killed]
continue
elif 'killall' in item_content:
try:
if get_content(item_content, 'killall', 'MetaBool') is True:
new_list = list()
except error.WrongType as err:
info.log('WARNING', 'panzer', err)
continue
else:
# Should never occur, caught by previous syntax check
continue
return new_list
def get_nested_content(metadata, fields, expected_type_of_leaf=None):
""" return content of field by traversing a list of MetaMaps
args:
metadata : dictionary to traverse
fields : list of fields to traverse in dictionary from
shallowest to deepest. Content of every field, except the last,
must be type 'MetaMap' (otherwise fields could not be traversed).
The content of final field in the list is returned.
expected_type_of_leaf : (optional) expected type of final field's
content
Returns:
content of final field in list, or the empty dict ({}) if field of
expected type is not found
"""
current_field = fields.pop(0)
try:
# If on a branch...
if fields:
next_content = get_content(metadata, current_field, 'MetaMap')
return get_nested_content(next_content, fields,
expected_type_of_leaf)
# Else on a leaf...
else:
return get_content(metadata, current_field, expected_type_of_leaf)
except error.MissingField:
# current_field not found, return {}: nothing to update
return dict()
except error.WrongType as err:
info.log('WARNING', 'panzer', err)
# wrong type found, return {}: nothing to update
return dict()
def get_content(metadata, field, expected_type=None):
""" return content of field """
if field not in metadata:
raise error.MissingField('field "%s" not found' % field)
check_c_and_t_exist(metadata[field])
if expected_type:
found_type = metadata[field][const.T]
if found_type != expected_type:
raise error.WrongType('value of "%s": expecting type "%s", '
'but found type "%s"'
% (field, expected_type, found_type))
return metadata[field][const.C]
def get_type(metadata, field):
""" return type of field """
if field not in metadata:
raise error.MissingField('field "%s" not found' % field)
check_c_and_t_exist(metadata[field])
return metadata[field][const.T]
def set_content(metadata, field, content, content_type):
""" set content and type of field in metadata """
metadata[field] = {const.C: content, const.T: content_type}
def get_list_or_inline(metadata, field):
""" return content of MetaList or MetaInlines item coerced as list """
field_type = get_type(metadata, field)
if field_type == 'MetaInlines':
content_raw = get_content(metadata, field, 'MetaInlines')
content = [pandocfilters.stringify(content_raw)]
return content
elif field_type == 'MetaString':
content_raw = get_content(metadata, field, 'MetaString')
content = [content_raw]
return content
elif field_type == 'MetaList':
content = list()
for content_raw in get_content(metadata, field, 'MetaList'):
content.append(pandocfilters.stringify(content_raw))
return content
else:
raise error.WrongType('"%s" value must be of type "MetaInlines", '
'"MetaList", or "MetaString"' % field)
def get_metadata(ast):
""" returns metadata branch of ast or {} if not present """
try:
if const.USE_OLD_API:
metadata = ast[0]['unMeta']
else:
metadata = ast['meta']
except KeyError:
metadata = dict()
return metadata
def get_runlist(metadata, kind, options):
""" return run list for kind from metadata """
runlist = list()
# - return empty list unless entries of kind are in metadata
try:
metadata_list = get_content(metadata, kind, 'MetaList')
except (error.WrongType, error.MissingField) as err:
info.log('WARNING', 'panzer', err)
return runlist
for item in metadata_list:
check_c_and_t_exist(item)
item_content = item[const.C]
# - create new entry
entry = dict()
entry['kind'] = kind
entry['command'] = str()
entry['status'] = const.QUEUED
# - get entry command
command_raw = get_content(item_content, 'run', 'MetaInlines')
command_str = pandocfilters.stringify(command_raw)
entry['command'] = util.resolve_path(command_str, kind, options)
# - get entry arguments
entry['arguments'] = list()
if 'args' in item_content:
try:
# - lua filters cannot take arguments
if kind == 'lua-filter':
raise error.NoArgsAllowed
if get_type(item_content, 'args') != 'MetaInlines':
raise error.BadArgsFormat
args_content = get_content(item_content, 'args', 'MetaInlines')
if len(args_content) != 1 \
or args_content[0][const.T] != 'Code':
raise error.BadArgsFormat
arguments_raw = args_content[0][const.C][1]
entry['arguments'] = shlex.split(arguments_raw)
except error.NoArgsAllowed:
info.log('ERROR', 'panzer', '"%s": lua filters do not take arguments -- arguments ignored' %
command_str)
entry['arguments'] = list()
except error.BadArgsFormat:
info.log('ERROR', 'panzer', 'Cannot read "args" of "%s". '
'Syntax should be args: "`--ARGUMENTS`"'
% command_str)
entry['arguments'] = list()
runlist.append(entry)
return runlist
def check_c_and_t_exist(item):
""" check item contains both C and T fields """
if const.C not in item:
message = 'Value of "%s" corrupt: "C" field missing' % repr(item)
raise error.BadASTError(message)
if const.T not in item:
message = 'Value of "%s" corrupt: "T" field missing' % repr(item)
raise error.BadASTError(message)
def expand_style_hierarchy(stylelist, styledef):
""" return stylelist expanded to include all parent styles """
expanded_list = []
for style in stylelist:
if style not in styledef:
# - style not in styledef tree
info.log('ERROR', 'panzer',
'No style definition found for style "%s" --- ignoring it'
% style)
continue
defcontent = get_content(styledef, style, 'MetaMap')
if 'parent' in defcontent:
# - non-leaf node
parents = get_list_or_inline(defcontent, 'parent')
expanded_list.extend(expand_style_hierarchy(parents, styledef))
expanded_list.append(style)
return expanded_list
def build_cli_options(dic):
"""
return a sorted list of command line options specified in the options
dictionary `dic`
"""
# - flags
flags = ['--%s' % opt for opt in dic
if dic[opt] == True]
flags.sort()
# - key-values
keyvals = ['--%s=%s' % (opt, dic[opt]) for opt in dic
if type(dic[opt]) is str]
keyvals.sort()
# - repeated key-values
rkeys = [key for key in dic if type(dic[key]) is list]
rkeys.sort()
rkeyvals = list()
for key in rkeys:
rkeyvals += ['--%s=%s' % (key, val[0]) for val in dic[key]]
return flags + keyvals + rkeyvals
def parse_commandline(metadata):
""" return a dictiory of pandoc command line options by parsing
`commandline` field in metadata; return None if `commandline` is absent in
metadata
"""
if 'commandline' not in metadata:
return None
field_type = get_type(metadata, 'commandline')
if field_type != 'MetaMap':
info.log('ERROR', 'panzer',
'Value of field "%s" should be of type "MetaMap"'
'---found value of type "%s", ignoring it'
% ('commandline', field_type))
return None
content = get_content(metadata, 'commandline')
# 1. remove bad options from `commandline`
bad_opts = list(const.PANDOC_BAD_COMMANDLINE)
for key in content:
if key in bad_opts:
info.log('ERROR', 'panzer',
'"%s" forbidden entry in panzer "commandline" '
'map---ignoring' % key)
if key not in const.PANDOC_OPT_PHASE:
info.log('ERROR', 'panzer',
'do not recognise pandoc command line option "--%s" in "commandline" '
'map---ignoring' % key)
bad_opts += key
content = {key: content[key]
for key in content
if key not in bad_opts}
# 2. parse remaining opts
commandline = {'r': dict(), 'w': dict()}
for key in content:
# 1. extract value of field with name 'key'
val = None
val_t = get_type(content, key)
val_c = get_content(content, key)
# if value is 'false', set OPTION: False
if val_t == 'MetaBool' and val_c is False:
val = False
# if value is 'true', set OPTION: True
elif val_t == 'MetaBool' and val_c is True \
and key not in const.PANDOC_OPT_ADDITIVE:
val = True
# if value type is inline code, set OPTION: VAL
elif val_t == 'MetaInlines':
if len(val_c) != 1 or val_c[0][const.T] != 'Code':
info.log('ERROR', 'panzer',
'Cannot read option "%s" in "commandline" field. '
'Syntax should be OPTION: "`VALUE`"' % key)
continue
if key in const.PANDOC_OPT_ADDITIVE:
val = [get_list_or_inline(content, key)]
else:
val = get_list_or_inline(content, key)[0]
# if value type is list of inline codes, set OPTION: [VALS]
elif val_t == 'MetaList' and key in const.PANDOC_OPT_ADDITIVE:
errs = False
for item in val_c:
if item[const.T] != 'MetaInlines' \
or item[const.C][0][const.T] != 'Code':
info.log('ERROR', 'panzer',
'Cannot read option "%s" in "commandline" field. '
'Syntax should be - OPTION: "`VALUE`"' % key)
errs = True
if not errs:
val = [[x] for x in get_list_or_inline(content, key)]
else:
continue
# otherwise, signal error
else:
info.log('ERROR', 'panzer',
'Cannot read entry "%s" with type "%s" in '
'"commandline"---ignoring' % (key, val_t))
continue
# 2. update commandline dictionary with key, val
for phase in const.PANDOC_OPT_PHASE[key]:
commandline[phase][key] = val
return commandline
def update_pandoc_options(old, new, mutable):
"""
return dictionary of pandoc command line options 'old' updated with 'new'
only options marked as mutable can be changed
"""
for p in ['r', 'w']:
for key in new[p]:
# if not mutable commandline line option, then skip it
if not mutable[p][key]:
continue
# if 'False', reset old[p][key] to default
elif new[p][key] is False:
if type(old[p][key]) is list:
old[p][key] = list()
elif type(old[p][key]) is str:
old[p][key] = None
elif type(old[p][key]) is bool:
old[p][key] = False
# if list, extend old list with new
elif key in old[p] and type(old[p][key]) is list:
old[p][key].extend(new[p][key])
# otherwise, override old with new
else:
old[p][key] = new[p][key]
return old
|
|
from ..Qt import QtGui, QtCore
from ..python2_3 import asUnicode
import numpy as np
from ..Point import Point
from .. import debug as debug
import weakref
from .. import functions as fn
from .. import getConfigOption
from .GraphicsWidget import GraphicsWidget
__all__ = ['AxisItem']
class AxisItem(GraphicsWidget):
"""
GraphicsItem showing a single plot axis with ticks, values, and label.
Can be configured to fit on any side of a plot, and can automatically synchronize its displayed scale with ViewBox items.
Ticks can be extended to draw a grid.
If maxTickLength is negative, ticks point into the plot.
"""
def __init__(self, orientation, pen=None, linkView=None, parent=None, maxTickLength=-5, showValues=True):
"""
============== ===============================================================
**Arguments:**
orientation one of 'left', 'right', 'top', or 'bottom'
maxTickLength (px) maximum length of ticks to draw. Negative values draw
into the plot, positive values draw outward.
linkView (ViewBox) causes the range of values displayed in the axis
to be linked to the visible range of a ViewBox.
showValues (bool) Whether to display values adjacent to ticks
pen (QPen) Pen used when drawing ticks.
============== ===============================================================
"""
GraphicsWidget.__init__(self, parent)
self.label = QtGui.QGraphicsTextItem(self)
self.picture = None
self.orientation = orientation
if orientation not in ['left', 'right', 'top', 'bottom']:
raise Exception("Orientation argument must be one of 'left', 'right', 'top', or 'bottom'.")
if orientation in ['left', 'right']:
self.label.rotate(-90)
self.style = {
'tickTextOffset': [5, 2], ## (horizontal, vertical) spacing between text and axis
'tickTextWidth': 30, ## space reserved for tick text
'tickTextHeight': 18,
'autoExpandTextSpace': True, ## automatically expand text space if needed
'tickFont': None,
'stopAxisAtTick': (False, False), ## whether axis is drawn to edge of box or to last tick
'textFillLimits': [ ## how much of the axis to fill up with tick text, maximally.
(0, 0.8), ## never fill more than 80% of the axis
(2, 0.6), ## If we already have 2 ticks with text, fill no more than 60% of the axis
(4, 0.4), ## If we already have 4 ticks with text, fill no more than 40% of the axis
(6, 0.2), ## If we already have 6 ticks with text, fill no more than 20% of the axis
],
'showValues': showValues,
'tickLength': maxTickLength,
}
self.textWidth = 30 ## Keeps track of maximum width / height of tick text
self.textHeight = 18
self.labelText = ''
self.labelUnits = ''
self.labelUnitPrefix=''
self.labelStyle = {}
self.logMode = False
self.tickFont = None
self._tickLevels = None ## used to override the automatic ticking system with explicit ticks
self.scale = 1.0
self.autoSIPrefix = True
self.autoSIPrefixScale = 1.0
self.setRange(0, 1)
if pen is None:
self.setPen()
else:
self.setPen(pen)
self._linkedView = None
if linkView is not None:
self.linkToView(linkView)
self.showLabel(False)
self.grid = False
#self.setCacheMode(self.DeviceCoordinateCache)
def setStyle(self, **kwds):
"""
Set various style options.
=================== =======================================================
Keyword Arguments:
tickLength (int) The maximum length of ticks in pixels.
Positive values point toward the text; negative
values point away.
tickTextOffset (int) reserved spacing between text and axis in px
tickTextWidth (int) Horizontal space reserved for tick text in px
tickTextHeight (int) Vertical space reserved for tick text in px
autoExpandTextSpace (bool) Automatically expand text space if the tick
strings become too long.
tickFont (QFont or None) Determines the font used for tick
values. Use None for the default font.
stopAxisAtTick (tuple: (bool min, bool max)) If True, the axis
line is drawn only as far as the last tick.
Otherwise, the line is drawn to the edge of the
AxisItem boundary.
textFillLimits (list of (tick #, % fill) tuples). This structure
determines how the AxisItem decides how many ticks
should have text appear next to them. Each tuple in
the list specifies what fraction of the axis length
may be occupied by text, given the number of ticks
that already have text displayed. For example::
[(0, 0.8), # Never fill more than 80% of the axis
(2, 0.6), # If we already have 2 ticks with text,
# fill no more than 60% of the axis
(4, 0.4), # If we already have 4 ticks with text,
# fill no more than 40% of the axis
(6, 0.2)] # If we already have 6 ticks with text,
# fill no more than 20% of the axis
showValues (bool) indicates whether text is displayed adjacent
to ticks.
=================== =======================================================
Added in version 0.9.9
"""
for kwd,value in kwds.items():
if kwd not in self.style:
raise NameError("%s is not a valid style argument." % kwd)
if kwd in ('tickLength', 'tickTextOffset', 'tickTextWidth', 'tickTextHeight'):
if not isinstance(value, int):
raise ValueError("Argument '%s' must be int" % kwd)
if kwd == 'tickTextOffset':
if self.orientation in ('left', 'right'):
self.style['tickTextOffset'][0] = value
else:
self.style['tickTextOffset'][1] = value
elif kwd == 'stopAxisAtTick':
try:
assert len(value) == 2 and isinstance(value[0], bool) and isinstance(value[1], bool)
except:
raise ValueError("Argument 'stopAxisAtTick' must have type (bool, bool)")
self.style[kwd] = value
else:
self.style[kwd] = value
self.picture = None
self._adjustSize()
self.update()
def close(self):
self.scene().removeItem(self.label)
self.label = None
self.scene().removeItem(self)
def setGrid(self, grid):
"""Set the alpha value (0-255) for the grid, or False to disable.
When grid lines are enabled, the axis tick lines are extended to cover
the extent of the linked ViewBox, if any.
"""
self.grid = grid
self.picture = None
self.prepareGeometryChange()
self.update()
def setLogMode(self, log):
"""
If *log* is True, then ticks are displayed on a logarithmic scale and values
are adjusted accordingly. (This is usually accessed by changing the log mode
of a :func:`PlotItem <pyqtgraph.PlotItem.setLogMode>`)
"""
self.logMode = log
self.picture = None
self.update()
def setTickFont(self, font):
self.tickFont = font
self.picture = None
self.prepareGeometryChange()
## Need to re-allocate space depending on font size?
self.update()
def resizeEvent(self, ev=None):
#s = self.size()
## Set the position of the label
nudge = 5
br = self.label.boundingRect()
p = QtCore.QPointF(0, 0)
if self.orientation == 'left':
p.setY(int(self.size().height()/2 + br.width()/2))
p.setX(-nudge)
elif self.orientation == 'right':
p.setY(int(self.size().height()/2 + br.width()/2))
p.setX(int(self.size().width()-br.height()+nudge))
elif self.orientation == 'top':
p.setY(-nudge)
p.setX(int(self.size().width()/2. - br.width()/2.))
elif self.orientation == 'bottom':
p.setX(int(self.size().width()/2. - br.width()/2.))
p.setY(int(self.size().height()-br.height()+nudge))
self.label.setPos(p)
self.picture = None
def showLabel(self, show=True):
"""Show/hide the label text for this axis."""
#self.drawLabel = show
self.label.setVisible(show)
if self.orientation in ['left', 'right']:
self.setWidth()
else:
self.setHeight()
if self.autoSIPrefix:
self.updateAutoSIPrefix()
def setLabel(self, text=None, units=None, unitPrefix=None, **args):
"""Set the text displayed adjacent to the axis.
============== =============================================================
**Arguments:**
text The text (excluding units) to display on the label for this
axis.
units The units for this axis. Units should generally be given
without any scaling prefix (eg, 'V' instead of 'mV'). The
scaling prefix will be automatically prepended based on the
range of data displayed.
**args All extra keyword arguments become CSS style options for
the <span> tag which will surround the axis label and units.
============== =============================================================
The final text generated for the label will look like::
<span style="...options...">{text} (prefix{units})</span>
Each extra keyword argument will become a CSS option in the above template.
For example, you can set the font size and color of the label::
labelStyle = {'color': '#FFF', 'font-size': '14pt'}
axis.setLabel('label text', units='V', **labelStyle)
"""
if text is not None:
self.labelText = text
self.showLabel()
if units is not None:
self.labelUnits = units
self.showLabel()
if unitPrefix is not None:
self.labelUnitPrefix = unitPrefix
if len(args) > 0:
self.labelStyle = args
self.label.setHtml(self.labelString())
self._adjustSize()
self.picture = None
self.update()
def labelString(self):
if self.labelUnits == '':
if not self.autoSIPrefix or self.autoSIPrefixScale == 1.0:
units = ''
else:
units = asUnicode('(x%g)') % (1.0/self.autoSIPrefixScale)
else:
#print repr(self.labelUnitPrefix), repr(self.labelUnits)
units = asUnicode('(%s%s)') % (asUnicode(self.labelUnitPrefix), asUnicode(self.labelUnits))
s = asUnicode('%s %s') % (asUnicode(self.labelText), asUnicode(units))
style = ';'.join(['%s: %s' % (k, self.labelStyle[k]) for k in self.labelStyle])
return asUnicode("<span style='%s'>%s</span>") % (style, asUnicode(s))
def _updateMaxTextSize(self, x):
## Informs that the maximum tick size orthogonal to the axis has
## changed; we use this to decide whether the item needs to be resized
## to accomodate.
if self.orientation in ['left', 'right']:
mx = max(self.textWidth, x)
if mx > self.textWidth or mx < self.textWidth-10:
self.textWidth = mx
if self.style['autoExpandTextSpace'] is True:
self.setWidth()
#return True ## size has changed
else:
mx = max(self.textHeight, x)
if mx > self.textHeight or mx < self.textHeight-10:
self.textHeight = mx
if self.style['autoExpandTextSpace'] is True:
self.setHeight()
#return True ## size has changed
def _adjustSize(self):
if self.orientation in ['left', 'right']:
self.setWidth()
else:
self.setHeight()
def setHeight(self, h=None):
"""Set the height of this axis reserved for ticks and tick labels.
The height of the axis label is automatically added."""
if h is None:
if not self.style['showValues']:
h = 0
elif self.style['autoExpandTextSpace'] is True:
h = self.textHeight
else:
h = self.style['tickTextHeight']
h += self.style['tickTextOffset'][1] if self.style['showValues'] else 0
h += max(0, self.style['tickLength'])
if self.label.isVisible():
h += self.label.boundingRect().height() * 0.8
self.setMaximumHeight(h)
self.setMinimumHeight(h)
self.picture = None
def setWidth(self, w=None):
"""Set the width of this axis reserved for ticks and tick labels.
The width of the axis label is automatically added."""
if w is None:
if not self.style['showValues']:
w = 0
elif self.style['autoExpandTextSpace'] is True:
w = self.textWidth
else:
w = self.style['tickTextWidth']
w += self.style['tickTextOffset'][0] if self.style['showValues'] else 0
w += max(0, self.style['tickLength'])
if self.label.isVisible():
w += self.label.boundingRect().height() * 0.8 ## bounding rect is usually an overestimate
self.setMaximumWidth(w)
self.setMinimumWidth(w)
self.picture = None
def pen(self):
if self._pen is None:
return fn.mkPen(getConfigOption('foreground'))
return fn.mkPen(self._pen)
def setPen(self, *args, **kwargs):
"""
Set the pen used for drawing text, axes, ticks, and grid lines.
If no arguments are given, the default foreground color will be used
(see :func:`setConfigOption <pyqtgraph.setConfigOption>`).
"""
self.picture = None
if args or kwargs:
self._pen = fn.mkPen(*args, **kwargs)
else:
self._pen = fn.mkPen(getConfigOption('foreground'))
self.labelStyle['color'] = '#' + fn.colorStr(self._pen.color())[:6]
self.setLabel()
self.update()
def setScale(self, scale=None):
"""
Set the value scaling for this axis.
Setting this value causes the axis to draw ticks and tick labels as if
the view coordinate system were scaled. By default, the axis scaling is
1.0.
"""
# Deprecated usage, kept for backward compatibility
if scale is None:
scale = 1.0
self.enableAutoSIPrefix(True)
if scale != self.scale:
self.scale = scale
self.setLabel()
self.picture = None
self.update()
def enableAutoSIPrefix(self, enable=True):
"""
Enable (or disable) automatic SI prefix scaling on this axis.
When enabled, this feature automatically determines the best SI prefix
to prepend to the label units, while ensuring that axis values are scaled
accordingly.
For example, if the axis spans values from -0.1 to 0.1 and has units set
to 'V' then the axis would display values -100 to 100
and the units would appear as 'mV'
This feature is enabled by default, and is only available when a suffix
(unit string) is provided to display on the label.
"""
self.autoSIPrefix = enable
self.updateAutoSIPrefix()
def updateAutoSIPrefix(self):
if self.label.isVisible():
(scale, prefix) = fn.siScale(max(abs(self.range[0]*self.scale), abs(self.range[1]*self.scale)))
if self.labelUnits == '' and prefix in ['k', 'm']: ## If we are not showing units, wait until 1e6 before scaling.
scale = 1.0
prefix = ''
self.setLabel(unitPrefix=prefix)
else:
scale = 1.0
self.autoSIPrefixScale = scale
self.picture = None
self.update()
def setRange(self, mn, mx):
"""Set the range of values displayed by the axis.
Usually this is handled automatically by linking the axis to a ViewBox with :func:`linkToView <pyqtgraph.AxisItem.linkToView>`"""
if any(np.isinf((mn, mx))) or any(np.isnan((mn, mx))):
raise Exception("Not setting range to [%s, %s]" % (str(mn), str(mx)))
self.range = [mn, mx]
if self.autoSIPrefix:
self.updateAutoSIPrefix()
self.picture = None
self.update()
def linkedView(self):
"""Return the ViewBox this axis is linked to"""
if self._linkedView is None:
return None
else:
return self._linkedView()
def linkToView(self, view):
"""Link this axis to a ViewBox, causing its displayed range to match the visible range of the view."""
oldView = self.linkedView()
self._linkedView = weakref.ref(view)
if self.orientation in ['right', 'left']:
if oldView is not None:
oldView.sigYRangeChanged.disconnect(self.linkedViewChanged)
view.sigYRangeChanged.connect(self.linkedViewChanged)
else:
if oldView is not None:
oldView.sigXRangeChanged.disconnect(self.linkedViewChanged)
view.sigXRangeChanged.connect(self.linkedViewChanged)
if oldView is not None:
oldView.sigResized.disconnect(self.linkedViewChanged)
view.sigResized.connect(self.linkedViewChanged)
def linkedViewChanged(self, view, newRange=None):
if self.orientation in ['right', 'left']:
if newRange is None:
newRange = view.viewRange()[1]
if view.yInverted():
self.setRange(*newRange[::-1])
else:
self.setRange(*newRange)
else:
if newRange is None:
newRange = view.viewRange()[0]
if view.xInverted():
self.setRange(*newRange[::-1])
else:
self.setRange(*newRange)
def boundingRect(self):
linkedView = self.linkedView()
if linkedView is None or self.grid is False:
rect = self.mapRectFromParent(self.geometry())
## extend rect if ticks go in negative direction
## also extend to account for text that flows past the edges
tl = self.style['tickLength']
if self.orientation == 'left':
rect = rect.adjusted(0, -15, -min(0,tl), 15)
elif self.orientation == 'right':
rect = rect.adjusted(min(0,tl), -15, 0, 15)
elif self.orientation == 'top':
rect = rect.adjusted(-15, 0, 15, -min(0,tl))
elif self.orientation == 'bottom':
rect = rect.adjusted(-15, min(0,tl), 15, 0)
return rect
else:
return self.mapRectFromParent(self.geometry()) | linkedView.mapRectToItem(self, linkedView.boundingRect())
def paint(self, p, opt, widget):
profiler = debug.Profiler()
if self.picture is None:
try:
picture = QtGui.QPicture()
painter = QtGui.QPainter(picture)
specs = self.generateDrawSpecs(painter)
profiler('generate specs')
if specs is not None:
self.drawPicture(painter, *specs)
profiler('draw picture')
finally:
painter.end()
self.picture = picture
#p.setRenderHint(p.Antialiasing, False) ## Sometimes we get a segfault here ???
#p.setRenderHint(p.TextAntialiasing, True)
self.picture.play(p)
def setTicks(self, ticks):
"""Explicitly determine which ticks to display.
This overrides the behavior specified by tickSpacing(), tickValues(), and tickStrings()
The format for *ticks* looks like::
[
[ (majorTickValue1, majorTickString1), (majorTickValue2, majorTickString2), ... ],
[ (minorTickValue1, minorTickString1), (minorTickValue2, minorTickString2), ... ],
...
]
If *ticks* is None, then the default tick system will be used instead.
"""
self._tickLevels = ticks
self.picture = None
self.update()
def tickSpacing(self, minVal, maxVal, size):
"""Return values describing the desired spacing and offset of ticks.
This method is called whenever the axis needs to be redrawn and is a
good method to override in subclasses that require control over tick locations.
The return value must be a list of tuples, one for each set of ticks::
[
(major tick spacing, offset),
(minor tick spacing, offset),
(sub-minor tick spacing, offset),
...
]
"""
dif = abs(maxVal - minVal)
if dif == 0:
return []
## decide optimal minor tick spacing in pixels (this is just aesthetics)
optimalTickCount = max(2., np.log(size))
## optimal minor tick spacing
optimalSpacing = dif / optimalTickCount
## the largest power-of-10 spacing which is smaller than optimal
p10unit = 10 ** np.floor(np.log10(optimalSpacing))
## Determine major/minor tick spacings which flank the optimal spacing.
intervals = np.array([1., 2., 10., 20., 100.]) * p10unit
minorIndex = 0
while intervals[minorIndex+1] <= optimalSpacing:
minorIndex += 1
levels = [
(intervals[minorIndex+2], 0),
(intervals[minorIndex+1], 0),
#(intervals[minorIndex], 0) ## Pretty, but eats up CPU
]
## decide whether to include the last level of ticks
minSpacing = min(size / 20., 30.)
maxTickCount = size / minSpacing
if dif / intervals[minorIndex] <= maxTickCount:
levels.append((intervals[minorIndex], 0))
return levels
##### This does not work -- switching between 2/5 confuses the automatic text-level-selection
### Determine major/minor tick spacings which flank the optimal spacing.
#intervals = np.array([1., 2., 5., 10., 20., 50., 100.]) * p10unit
#minorIndex = 0
#while intervals[minorIndex+1] <= optimalSpacing:
#minorIndex += 1
### make sure we never see 5 and 2 at the same time
#intIndexes = [
#[0,1,3],
#[0,2,3],
#[2,3,4],
#[3,4,6],
#[3,5,6],
#][minorIndex]
#return [
#(intervals[intIndexes[2]], 0),
#(intervals[intIndexes[1]], 0),
#(intervals[intIndexes[0]], 0)
#]
def tickValues(self, minVal, maxVal, size):
"""
Return the values and spacing of ticks to draw::
[
(spacing, [major ticks]),
(spacing, [minor ticks]),
...
]
By default, this method calls tickSpacing to determine the correct tick locations.
This is a good method to override in subclasses.
"""
minVal, maxVal = sorted((minVal, maxVal))
minVal *= self.scale
maxVal *= self.scale
#size *= self.scale
ticks = []
tickLevels = self.tickSpacing(minVal, maxVal, size)
allValues = np.array([])
for i in range(len(tickLevels)):
spacing, offset = tickLevels[i]
## determine starting tick
start = (np.ceil((minVal-offset) / spacing) * spacing) + offset
## determine number of ticks
num = int((maxVal-start) / spacing) + 1
values = (np.arange(num) * spacing + start) / self.scale
## remove any ticks that were present in higher levels
## we assume here that if the difference between a tick value and a previously seen tick value
## is less than spacing/100, then they are 'equal' and we can ignore the new tick.
values = list(filter(lambda x: all(np.abs(allValues-x) > spacing*0.01), values) )
allValues = np.concatenate([allValues, values])
ticks.append((spacing/self.scale, values))
if self.logMode:
return self.logTickValues(minVal, maxVal, size, ticks)
#nticks = []
#for t in ticks:
#nvals = []
#for v in t[1]:
#nvals.append(v/self.scale)
#nticks.append((t[0]/self.scale,nvals))
#ticks = nticks
return ticks
def logTickValues(self, minVal, maxVal, size, stdTicks):
## start with the tick spacing given by tickValues().
## Any level whose spacing is < 1 needs to be converted to log scale
ticks = []
for (spacing, t) in stdTicks:
if spacing >= 1.0:
ticks.append((spacing, t))
if len(ticks) < 3:
v1 = int(np.floor(minVal))
v2 = int(np.ceil(maxVal))
#major = list(range(v1+1, v2))
minor = []
for v in range(v1, v2):
minor.extend(v + np.log10(np.arange(1, 10)))
minor = [x for x in minor if x>minVal and x<maxVal]
ticks.append((None, minor))
return ticks
def tickStrings(self, values, scale, spacing):
"""Return the strings that should be placed next to ticks. This method is called
when redrawing the axis and is a good method to override in subclasses.
The method is called with a list of tick values, a scaling factor (see below), and the
spacing between ticks (this is required since, in some instances, there may be only
one tick and thus no other way to determine the tick spacing)
The scale argument is used when the axis label is displaying units which may have an SI scaling prefix.
When determining the text to display, use value*scale to correctly account for this prefix.
For example, if the axis label's units are set to 'V', then a tick value of 0.001 might
be accompanied by a scale value of 1000. This indicates that the label is displaying 'mV', and
thus the tick should display 0.001 * 1000 = 1.
"""
if self.logMode:
return self.logTickStrings(values, scale, spacing)
places = max(0, np.ceil(-np.log10(spacing*scale)))
strings = []
for v in values:
vs = v * scale
if abs(vs) < .001 or abs(vs) >= 10000:
vstr = "%g" % vs
else:
vstr = ("%%0.%df" % places) % vs
strings.append(vstr)
return strings
def logTickStrings(self, values, scale, spacing):
return ["%0.1g"%x for x in 10 ** np.array(values).astype(float)]
def generateDrawSpecs(self, p):
"""
Calls tickValues() and tickStrings() to determine where and how ticks should
be drawn, then generates from this a set of drawing commands to be
interpreted by drawPicture().
"""
profiler = debug.Profiler()
#bounds = self.boundingRect()
bounds = self.mapRectFromParent(self.geometry())
linkedView = self.linkedView()
if linkedView is None or self.grid is False:
tickBounds = bounds
else:
tickBounds = linkedView.mapRectToItem(self, linkedView.boundingRect())
if self.orientation == 'left':
span = (bounds.topRight(), bounds.bottomRight())
tickStart = tickBounds.right()
tickStop = bounds.right()
tickDir = -1
axis = 0
elif self.orientation == 'right':
span = (bounds.topLeft(), bounds.bottomLeft())
tickStart = tickBounds.left()
tickStop = bounds.left()
tickDir = 1
axis = 0
elif self.orientation == 'top':
span = (bounds.bottomLeft(), bounds.bottomRight())
tickStart = tickBounds.bottom()
tickStop = bounds.bottom()
tickDir = -1
axis = 1
elif self.orientation == 'bottom':
span = (bounds.topLeft(), bounds.topRight())
tickStart = tickBounds.top()
tickStop = bounds.top()
tickDir = 1
axis = 1
#print tickStart, tickStop, span
## determine size of this item in pixels
points = list(map(self.mapToDevice, span))
if None in points:
return
lengthInPixels = Point(points[1] - points[0]).length()
if lengthInPixels == 0:
return
# Determine major / minor / subminor axis ticks
if self._tickLevels is None:
tickLevels = self.tickValues(self.range[0], self.range[1], lengthInPixels)
tickStrings = None
else:
## parse self.tickLevels into the formats returned by tickLevels() and tickStrings()
tickLevels = []
tickStrings = []
for level in self._tickLevels:
values = []
strings = []
tickLevels.append((None, values))
tickStrings.append(strings)
for val, strn in level:
values.append(val)
strings.append(strn)
textLevel = 1 ## draw text at this scale level
## determine mapping between tick values and local coordinates
dif = self.range[1] - self.range[0]
if dif == 0:
xScale = 1
offset = 0
else:
if axis == 0:
xScale = -bounds.height() / dif
offset = self.range[0] * xScale - bounds.height()
else:
xScale = bounds.width() / dif
offset = self.range[0] * xScale
xRange = [x * xScale - offset for x in self.range]
xMin = min(xRange)
xMax = max(xRange)
profiler('init')
tickPositions = [] # remembers positions of previously drawn ticks
## compute coordinates to draw ticks
## draw three different intervals, long ticks first
tickSpecs = []
for i in range(len(tickLevels)):
tickPositions.append([])
ticks = tickLevels[i][1]
## length of tick
tickLength = self.style['tickLength'] / ((i*0.5)+1.0)
lineAlpha = 255 / (i+1)
if self.grid is not False:
lineAlpha *= self.grid/255. * np.clip((0.05 * lengthInPixels / (len(ticks)+1)), 0., 1.)
for v in ticks:
## determine actual position to draw this tick
x = (v * xScale) - offset
if x < xMin or x > xMax: ## last check to make sure no out-of-bounds ticks are drawn
tickPositions[i].append(None)
continue
tickPositions[i].append(x)
p1 = [x, x]
p2 = [x, x]
p1[axis] = tickStart
p2[axis] = tickStop
if self.grid is False:
p2[axis] += tickLength*tickDir
tickPen = self.pen()
color = tickPen.color()
color.setAlpha(lineAlpha)
tickPen.setColor(color)
tickSpecs.append((tickPen, Point(p1), Point(p2)))
profiler('compute ticks')
if self.style['stopAxisAtTick'][0] is True:
stop = max(span[0].y(), min(map(min, tickPositions)))
if axis == 0:
span[0].setY(stop)
else:
span[0].setX(stop)
if self.style['stopAxisAtTick'][1] is True:
stop = min(span[1].y(), max(map(max, tickPositions)))
if axis == 0:
span[1].setY(stop)
else:
span[1].setX(stop)
axisSpec = (self.pen(), span[0], span[1])
textOffset = self.style['tickTextOffset'][axis] ## spacing between axis and text
#if self.style['autoExpandTextSpace'] is True:
#textWidth = self.textWidth
#textHeight = self.textHeight
#else:
#textWidth = self.style['tickTextWidth'] ## space allocated for horizontal text
#textHeight = self.style['tickTextHeight'] ## space allocated for horizontal text
textSize2 = 0
textRects = []
textSpecs = [] ## list of draw
# If values are hidden, return early
if not self.style['showValues']:
return (axisSpec, tickSpecs, textSpecs)
for i in range(len(tickLevels)):
## Get the list of strings to display for this level
if tickStrings is None:
spacing, values = tickLevels[i]
strings = self.tickStrings(values, self.autoSIPrefixScale * self.scale, spacing)
else:
strings = tickStrings[i]
if len(strings) == 0:
continue
## ignore strings belonging to ticks that were previously ignored
for j in range(len(strings)):
if tickPositions[i][j] is None:
strings[j] = None
## Measure density of text; decide whether to draw this level
rects = []
for s in strings:
if s is None:
rects.append(None)
else:
br = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, asUnicode(s))
## boundingRect is usually just a bit too large
## (but this probably depends on per-font metrics?)
br.setHeight(br.height() * 0.8)
rects.append(br)
textRects.append(rects[-1])
## measure all text, make sure there's enough room
if axis == 0:
textSize = np.sum([r.height() for r in textRects])
textSize2 = np.max([r.width() for r in textRects]) if textRects else 0
else:
textSize = np.sum([r.width() for r in textRects])
textSize2 = np.max([r.height() for r in textRects]) if textRects else 0
if i > 0: ## always draw top level
## If the strings are too crowded, stop drawing text now.
## We use three different crowding limits based on the number
## of texts drawn so far.
textFillRatio = float(textSize) / lengthInPixels
finished = False
for nTexts, limit in self.style['textFillLimits']:
if len(textSpecs) >= nTexts and textFillRatio >= limit:
finished = True
break
if finished:
break
#spacing, values = tickLevels[best]
#strings = self.tickStrings(values, self.scale, spacing)
# Determine exactly where tick text should be drawn
for j in range(len(strings)):
vstr = strings[j]
if vstr is None: ## this tick was ignored because it is out of bounds
continue
vstr = asUnicode(vstr)
x = tickPositions[i][j]
#textRect = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, vstr)
textRect = rects[j]
height = textRect.height()
width = textRect.width()
#self.textHeight = height
offset = max(0,self.style['tickLength']) + textOffset
if self.orientation == 'left':
textFlags = QtCore.Qt.TextDontClip|QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter
rect = QtCore.QRectF(tickStop-offset-width, x-(height/2), width, height)
elif self.orientation == 'right':
textFlags = QtCore.Qt.TextDontClip|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter
rect = QtCore.QRectF(tickStop+offset, x-(height/2), width, height)
elif self.orientation == 'top':
textFlags = QtCore.Qt.TextDontClip|QtCore.Qt.AlignCenter|QtCore.Qt.AlignBottom
rect = QtCore.QRectF(x-width/2., tickStop-offset-height, width, height)
elif self.orientation == 'bottom':
textFlags = QtCore.Qt.TextDontClip|QtCore.Qt.AlignCenter|QtCore.Qt.AlignTop
rect = QtCore.QRectF(x-width/2., tickStop+offset, width, height)
#p.setPen(self.pen())
#p.drawText(rect, textFlags, vstr)
textSpecs.append((rect, textFlags, vstr))
profiler('compute text')
## update max text size if needed.
self._updateMaxTextSize(textSize2)
return (axisSpec, tickSpecs, textSpecs)
def drawPicture(self, p, axisSpec, tickSpecs, textSpecs):
profiler = debug.Profiler()
p.setRenderHint(p.Antialiasing, False)
p.setRenderHint(p.TextAntialiasing, True)
## draw long line along axis
pen, p1, p2 = axisSpec
p.setPen(pen)
p.drawLine(p1, p2)
p.translate(0.5,0) ## resolves some damn pixel ambiguity
## draw ticks
for pen, p1, p2 in tickSpecs:
p.setPen(pen)
p.drawLine(p1, p2)
profiler('draw ticks')
## Draw all text
if self.tickFont is not None:
p.setFont(self.tickFont)
p.setPen(self.pen())
for rect, flags, text in textSpecs:
p.drawText(rect, flags, text)
#p.drawRect(rect)
profiler('draw text')
def show(self):
if self.orientation in ['left', 'right']:
self.setWidth()
else:
self.setHeight()
GraphicsWidget.show(self)
def hide(self):
if self.orientation in ['left', 'right']:
self.setWidth(0)
else:
self.setHeight(0)
GraphicsWidget.hide(self)
def wheelEvent(self, ev):
if self.linkedView() is None:
return
if self.orientation in ['left', 'right']:
self.linkedView().wheelEvent(ev, axis=1)
else:
self.linkedView().wheelEvent(ev, axis=0)
ev.accept()
def mouseDragEvent(self, event):
if self.linkedView() is None:
return
if self.orientation in ['left', 'right']:
return self.linkedView().mouseDragEvent(event, axis=1)
else:
return self.linkedView().mouseDragEvent(event, axis=0)
def mouseClickEvent(self, event):
if self.linkedView() is None:
return
return self.linkedView().mouseClickEvent(event)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
from oslo_log import log as logging
from trove.common import cfg
from trove.common import context as trove_context
from trove.common.i18n import _
from trove.common import instance
from trove.conductor import api as conductor_api
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BaseDbStatus(object):
"""
Answers the question "what is the status of the DB application on
this box?" The answer can be that the application is not installed, or
the state of the application is determined by calling a series of
commands.
This class also handles saving and load the status of the DB application
in the database.
The status is updated whenever the update() method is called, except
if the state is changed to building or restart mode using the
"begin_install" and "begin_restart" methods.
The building mode persists in the database while restarting mode does
not (so if there is a Python Pete crash update() will set the status to
show a failure).
These modes are exited and functionality to update() returns when
end_install or end_restart() is called, at which point the status again
reflects the actual status of the DB app.
This is a base class, subclasses must implement real logic for
determining current status of DB in _get_actual_db_status()
"""
_instance = None
GUESTAGENT_DIR = '~'
PREPARE_START_FILENAME = '.guestagent.prepare.start'
PREPARE_END_FILENAME = '.guestagent.prepare.end'
def __init__(self):
if self._instance is not None:
raise RuntimeError("Cannot instantiate twice.")
self.status = None
self.restart_mode = False
self.__prepare_completed = None
@property
def prepare_completed(self):
if self.__prepare_completed is None:
# Force the file check
self.__refresh_prepare_completed()
return self.__prepare_completed
def __refresh_prepare_completed(self):
# Set the value of __prepared_completed based on the existence of
# the file. This is required as the state is cached so this method
# must be called any time the existence of the file changes.
self.__prepare_completed = os.path.isfile(
guestagent_utils.build_file_path(
self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME))
def begin_install(self):
"""First call of the DB prepare."""
prepare_start_file = guestagent_utils.build_file_path(
self.GUESTAGENT_DIR, self.PREPARE_START_FILENAME)
operating_system.write_file(prepare_start_file, '')
self.__refresh_prepare_completed()
self.set_status(instance.ServiceStatuses.BUILDING, True)
def begin_restart(self):
"""Called before restarting DB server."""
self.restart_mode = True
def end_install(self, error_occurred=False, post_processing=False):
"""Called after prepare has ended."""
# Set the "we're done" flag if there's no error and
# no post_processing is necessary
if not (error_occurred or post_processing):
prepare_end_file = guestagent_utils.build_file_path(
self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME)
operating_system.write_file(prepare_end_file, '')
self.__refresh_prepare_completed()
final_status = None
if error_occurred:
final_status = instance.ServiceStatuses.FAILED
elif post_processing:
final_status = instance.ServiceStatuses.INSTANCE_READY
if final_status:
LOG.info(_("Set final status to %s.") % final_status)
self.set_status(final_status, force=True)
else:
self._end_install_or_restart(True)
def end_restart(self):
self.restart_mode = False
LOG.info(_("Ending restart."))
self._end_install_or_restart(False)
def _end_install_or_restart(self, force):
"""Called after DB is installed or restarted.
Updates the database with the actual DB server status.
"""
real_status = self._get_actual_db_status()
LOG.info(_("Current database status is '%s'.") % real_status)
self.set_status(real_status, force=force)
def _get_actual_db_status(self):
raise NotImplementedError()
@property
def is_installed(self):
"""
True if DB app should be installed and attempts to ascertain
its status won't result in nonsense.
"""
return self.prepare_completed
@property
def _is_restarting(self):
return self.restart_mode
@property
def is_running(self):
"""True if DB server is running."""
return (self.status is not None and
self.status == instance.ServiceStatuses.RUNNING)
def set_status(self, status, force=False):
"""Use conductor to update the DB app status."""
if force or self.is_installed:
LOG.debug("Casting set_status message to conductor "
"(status is '%s')." % status.description)
context = trove_context.TroveContext()
heartbeat = {'service_status': status.description}
conductor_api.API(context).heartbeat(
CONF.guest_id, heartbeat, sent=timeutils.float_utcnow())
LOG.debug("Successfully cast set_status.")
self.status = status
else:
LOG.debug("Prepare has not completed yet, skipping heartbeat.")
def update(self):
"""Find and report status of DB on this machine.
The database is updated and the status is also returned.
"""
if self.is_installed and not self._is_restarting:
LOG.debug("Determining status of DB server.")
status = self._get_actual_db_status()
self.set_status(status)
else:
LOG.info(_("DB server is not installed or is in restart mode, so "
"for now we'll skip determining the status of DB on "
"this instance."))
def restart_db_service(self, service_candidates, timeout):
"""Restart the database.
Do not change the service auto-start setting.
Disable the Trove instance heartbeat updates during the restart.
1. Stop the database service.
2. Wait for the database to shutdown.
3. Start the database service.
4. Wait for the database to start running.
:param service_candidates: List of possible system service names.
:type service_candidates: list
:param timeout: Wait timeout in seconds.
:type timeout: integer
:raises: :class:`RuntimeError` on failure.
"""
try:
self.begin_restart()
self.stop_db_service(service_candidates, timeout,
disable_on_boot=False, update_db=False)
self.start_db_service(service_candidates, timeout,
enable_on_boot=False, update_db=False)
except Exception as e:
LOG.exception(e)
raise RuntimeError(_("Database restart failed."))
finally:
self.end_restart()
def start_db_service(self, service_candidates, timeout,
enable_on_boot=True, update_db=False):
"""Start the database service and wait for the database to become
available.
The service auto-start will be updated only if the service command
succeeds.
:param service_candidates: List of possible system service names.
:type service_candidates: list
:param timeout: Wait timeout in seconds.
:type timeout: integer
:param enable_on_boot: Enable service auto-start.
The auto-start setting will be updated
only if the service command succeeds.
:type enable_on_boot: boolean
:param update_db: Suppress the Trove instance heartbeat.
:type update_db: boolean
:raises: :class:`RuntimeError` on failure.
"""
LOG.info(_("Starting database service."))
operating_system.start_service(service_candidates)
self.wait_for_database_service_start(timeout, update_db=update_db)
if enable_on_boot:
LOG.info(_("Enable service auto-start on boot."))
operating_system.enable_service_on_boot(service_candidates)
def wait_for_database_service_start(self, timeout, update_db=False):
"""Wait for the database to become available.
:param timeout: Wait timeout in seconds.
:type timeout: integer
:param update_db: Suppress the Trove instance heartbeat.
:type update_db: boolean
:raises: :class:`RuntimeError` on failure.
"""
LOG.debug("Waiting for database to start up.")
if not self._wait_for_database_service_status(
instance.ServiceStatuses.RUNNING, timeout, update_db):
raise RuntimeError(_("Database failed to start."))
LOG.info(_("Database has started successfully."))
def stop_db_service(self, service_candidates, timeout,
disable_on_boot=False, update_db=False):
"""Stop the database service and wait for the database to shutdown.
:param service_candidates: List of possible system service names.
:type service_candidates: list
:param timeout: Wait timeout in seconds.
:type timeout: integer
:param disable_on_boot: Disable service auto-start.
The auto-start setting will be updated
only if the service command succeeds.
:type disable_on_boot: boolean
:param update_db: Suppress the Trove instance heartbeat.
:type update_db: boolean
:raises: :class:`RuntimeError` on failure.
"""
LOG.info(_("Stopping database service."))
operating_system.stop_service(service_candidates)
LOG.debug("Waiting for database to shutdown.")
if not self._wait_for_database_service_status(
instance.ServiceStatuses.SHUTDOWN, timeout, update_db):
raise RuntimeError(_("Database failed to stop."))
LOG.info(_("Database has stopped successfully."))
if disable_on_boot:
LOG.info(_("Disable service auto-start on boot."))
operating_system.disable_service_on_boot(service_candidates)
def _wait_for_database_service_status(self, status, timeout, update_db):
"""Wait for the given database status.
:param status: The status to wait for.
:type status: BaseDbStatus
:param timeout: Wait timeout in seconds.
:type timeout: integer
:param update_db: Suppress the Trove instance heartbeat.
:type update_db: boolean
:returns: True on success, False otherwise.
"""
if not self.wait_for_real_status_to_change_to(
status, timeout, update_db):
LOG.info(_("Service status did not change to %(status)s "
"within the given timeout: %(timeout)ds")
% {'status': status, 'timeout': timeout})
LOG.debug("Attempting to cleanup stalled services.")
try:
self.cleanup_stalled_db_services()
except Exception:
LOG.debug("Cleanup failed.", exc_info=True)
return False
return True
def wait_for_real_status_to_change_to(self, status, max_time,
update_db=False):
"""Waits the given time for the real status to change to the one
specified.
The internal status is always updated. The public instance
state stored in the Trove database is updated only if "update_db" is
True.
"""
end_time = time.time() + max_time
# since python does not support a real do-while loop, we have
# to emulate one. Hence these shenanigans. We force at least
# one pass into the loop and therefore it is safe that
# actual_status is initialized in the loop while it is used
# outside.
loop = True
while loop:
self.status = self._get_actual_db_status()
if self.status == status:
if update_db:
self.set_status(self.status)
return True
# should we remain in this loop? this is the thing
# that emulates the do-while construct.
loop = (time.time() < end_time)
# no point waiting if our time is up and we're
# just going to error out anyway.
if loop:
LOG.debug("Waiting for DB status to change from "
"%(actual_status)s to %(status)s." %
{"actual_status": self.status, "status": status})
time.sleep(CONF.state_change_poll_time)
LOG.error(_("Timeout while waiting for database status to change."
"Expected state %(status)s, "
"current state is %(actual_status)s") %
{"status": status, "actual_status": self.status})
return False
def cleanup_stalled_db_services(self):
"""An optional datastore-specific code to cleanup stalled
database services and other resources after a status change timeout.
"""
LOG.debug("No cleanup action specified for this datastore.")
def report_root(self, context, user):
"""Use conductor to update the root-enable status."""
LOG.debug("Casting report_root message to conductor.")
conductor_api.API(context).report_root(CONF.guest_id, user)
LOG.debug("Successfully cast report_root.")
|
|
import urlparse
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.http import urlsafe_base64_decode
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect,csrf_exempt
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm, SetPasswordForm, PasswordChangeForm
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
@csrf_exempt
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.method == "POST":
form = authentication_form(data=request.POST)
if form.is_valid():
netloc = urlparse.urlparse(redirect_to)[1]
# Use default setting if redirect_to is empty
if not redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
# Security check -- don't allow redirection to a different
# host.
elif netloc and netloc != request.get_host():
redirect_to = settings.LOGIN_REDIRECT_URL
# Okay, security checks complete. Log the user in.
auth_login(request, form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
request.session.set_test_cookie()
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def logout(request, next_page=None,
template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME,
current_app=None, extra_context=None):
"""
Logs out the user and displays 'You are logged out' message.
"""
auth_logout(request)
redirect_to = request.REQUEST.get(redirect_field_name, '')
if redirect_to:
netloc = urlparse.urlparse(redirect_to)[1]
# Security check -- don't allow redirection to a different host.
if not (netloc and netloc != request.get_host()):
return HttpResponseRedirect(redirect_to)
if next_page is None:
current_site = get_current_site(request)
context = {
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out')
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
else:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page or request.path)
def logout_then_login(request, login_url=None, current_app=None, extra_context=None):
"""
Logs out the user if he is logged in. Then redirects to the log-in page.
"""
if not login_url:
login_url = settings.LOGIN_URL
return logout(request, login_url, current_app=current_app, extra_context=extra_context)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirects the user to the login page, passing the given 'next' page
"""
if not login_url:
login_url = settings.LOGIN_URL
login_url_parts = list(urlparse.urlparse(login_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlparse.urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@csrf_protect
def password_reset(request, is_admin_site=False,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
current_app=None,
extra_context=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done')
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'request': request,
}
if is_admin_site:
opts = dict(opts, domain_override=request.META['HTTP_HOST'])
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def password_reset_done(request,
template_name='registration/password_reset_done.html',
current_app=None, extra_context=None):
context = {}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
# Doesn't need csrf_protect since no-one can guess the URL
@never_cache
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
current_app=None, extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_complete')
try:
uid = urlsafe_base64_decode(str(uidb64))
user = User.objects.get(id=uid)
except (TypeError, ValueError, User.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(None)
else:
validlink = False
form = None
context = {
'form': form,
'validlink': validlink,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
current_app=None, extra_context=None):
context = {
'login_url': settings.LOGIN_URL
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
@csrf_protect
@login_required
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
current_app=None, extra_context=None):
if post_change_redirect is None:
post_change_redirect = reverse('django.contrib.auth.views.password_change_done')
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
def password_change_done(request,
template_name='registration/password_change_done.html',
current_app=None, extra_context=None):
context = {}
context.update(extra_context or {})
return render_to_response(template_name, context,
context_instance=RequestContext(request, current_app=current_app))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.MatrixDeterminant."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class DeterminantOpTest(test.TestCase):
def _compareDeterminantBase(self, matrix_x, tf_ans):
out = tf_ans.eval()
shape = matrix_x.shape
if shape[-1] == 0 and shape[-2] == 0:
np_ans = np.ones(shape[:-2]).astype(matrix_x.dtype)
else:
np_ans = np.array(np.linalg.det(matrix_x)).astype(matrix_x.dtype)
self.assertShapeEqual(np_ans, tf_ans)
self.assertAllClose(np_ans, out, atol=5e-5)
def _compareLogDeterminantBase(self, matrix_x, tf_ans):
sign_tf, abs_log_det_tf = tf_ans
shape = matrix_x.shape
if shape[-1] == 0 or shape[-2] == 0:
np_sign, np_ans = (1.0, np.zeros(shape[:-2]).astype(matrix_x.dtype))
else:
np_sign, np_ans = np.linalg.slogdet(matrix_x)
np_ans = np_ans.astype(matrix_x.dtype)
self.assertShapeEqual(np_ans, abs_log_det_tf)
sign_tf_val = sign_tf.eval()
abs_log_det_tf_val = abs_log_det_tf.eval()
self.assertAllClose(
sign_tf_val * np.exp(abs_log_det_tf_val),
np_sign * np.exp(np_ans),
atol=5e-5)
def _compareDeterminant(self, matrix_x):
with self.test_session(use_gpu=True):
self._compareDeterminantBase(matrix_x,
linalg_ops.matrix_determinant(matrix_x))
self._compareLogDeterminantBase(
matrix_x, gen_linalg_ops.log_matrix_determinant(matrix_x))
def testBasic(self):
# 2x2 matrices
self._compareDeterminant(np.array([[2., 3.], [3., 4.]]).astype(np.float32))
self._compareDeterminant(np.array([[0., 0.], [0., 0.]]).astype(np.float32))
# 5x5 matrices (Eigen forces LU decomposition)
self._compareDeterminant(
np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
2., 5., 8., 3., 8.
], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float32))
# A multidimensional batch of 2x2 matrices
self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.float32))
def testBasicDouble(self):
# 2x2 matrices
self._compareDeterminant(np.array([[2., 3.], [3., 4.]]).astype(np.float64))
self._compareDeterminant(np.array([[0., 0.], [0., 0.]]).astype(np.float64))
# 5x5 matrices (Eigen forces LU decomposition)
self._compareDeterminant(
np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
2., 5., 8., 3., 8.
], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float64))
# A multidimensional batch of 2x2 matrices
self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.float64))
def testBasicComplex64(self):
# 2x2 matrices
self._compareDeterminant(
np.array([[2., 3.], [3., 4.]]).astype(np.complex64))
self._compareDeterminant(
np.array([[0., 0.], [0., 0.]]).astype(np.complex64))
self._compareDeterminant(
np.array([[1. + 1.j, 1. - 1.j], [-1. + 1.j, -1. - 1.j]]).astype(
np.complex64))
# 5x5 matrices (Eigen forces LU decomposition)
self._compareDeterminant(
np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
2., 5., 8., 3., 8.
], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.complex64))
# A multidimensional batch of 2x2 matrices
self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.complex64))
def testBasicComplex128(self):
# 2x2 matrices
self._compareDeterminant(
np.array([[2., 3.], [3., 4.]]).astype(np.complex128))
self._compareDeterminant(
np.array([[0., 0.], [0., 0.]]).astype(np.complex128))
self._compareDeterminant(
np.array([[1. + 1.j, 1. - 1.j], [-1. + 1.j, -1. - 1.j]]).astype(
np.complex128))
# 5x5 matrices (Eigen forces LU decomposition)
self._compareDeterminant(
np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
2., 5., 8., 3., 8.
], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.complex128))
# A multidimensional batch of 2x2 matrices
self._compareDeterminant(
np.random.rand(3, 4, 5, 2, 2).astype(np.complex128))
def testInfiniteDeterminant(self):
max_double = np.finfo("d").max
huge_matrix = np.array([[max_double, 0.0], [0.0, max_double]])
self._compareDeterminant(huge_matrix)
def testNonSquareMatrix(self):
# When the determinant of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_ops.matrix_determinant(
np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32))
def testWrongDimensions(self):
# The input to the determinant should be a 2-dimensional tensor.
tensor1 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.matrix_determinant(tensor1)
def testEmpty(self):
self._compareDeterminant(np.empty([0, 2, 2]))
self._compareDeterminant(np.empty([2, 0, 0]))
def testConcurrentExecutesWithoutError(self):
with self.test_session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
det1 = linalg_ops.matrix_determinant(matrix1)
det2 = linalg_ops.matrix_determinant(matrix2)
det1_val, det2_val = sess.run([det1, det2])
self.assertEqual(det1_val, det2_val)
class MatrixDeterminantBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixDeterminantOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), session.Session(
config=benchmark.benchmark_config()) as sess, ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
d = linalg_ops.matrix_determinant(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
d,),
min_iters=25,
name="matrix_determinant_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), session.Session(
config=benchmark.benchmark_config()) as sess, ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
d = linalg_ops.matrix_determinant(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
d,),
min_iters=25,
name="matrix_determinant_gpu_{shape}".format(shape=shape))
if __name__ == "__main__":
test.main()
|
|
try:
from unittest2 import TestCase
from mock import Mock, patch
except ImportError:
from unittest import TestCase
from mock import Mock, patch
import datetime
from botocore.exceptions import ClientError
from dateutil.tz import tzutc
from cfn_sphere.aws.cfn import CloudFormation
from cfn_sphere.aws.cfn import CloudFormationStack
from cfn_sphere.exceptions import CfnStackActionFailedException, CfnSphereBotoError
from cfn_sphere.template import CloudFormationTemplate
class CloudFormationApiTests(TestCase):
@patch('cfn_sphere.aws.cfn.boto3.resource')
def test_get_stack_properly_calls_boto(self, boto_mock):
CloudFormation().get_stack("Foo")
boto_mock.return_value.Stack.assert_called_once_with("Foo")
@patch('cfn_sphere.aws.cfn.boto3.resource')
def test_get_stacks_properly_calls_boto(self, boto_mock):
CloudFormation().get_stacks()
boto_mock.return_value.stacks.all.assert_called_once_with()
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
def test_stack_exists_returns_true_for_existing_stack(self, get_stack_mock):
get_stack_mock.return_value = Mock()
self.assertTrue(CloudFormation().stack_exists("stack1"))
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
def test_stack_exists_returns_false_for_non_existing_stack(self, get_stack_mock):
get_stack_mock.side_effect = ClientError({"Error": {"Message": "Stack with id stack3 does not exist"}}, "Foo")
self.assertFalse(CloudFormation().stack_exists("stack3"))
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack_descriptions')
def test_get_stacks_dict_returns_empty_dict_with_no_stacks(self, get_stack_descriptions_mock):
get_stack_descriptions_mock.return_value = {}
self.assertEqual({}, CloudFormation().get_stacks_dict())
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack_descriptions')
def test_get_stacks_dict_returns_stack_dict(self, get_stack_descriptions_mock):
get_stack_descriptions_mock.return_value = [{"StackName": "Foo", "Parameters": [], "Outputs": []}]
self.assertEqual({'Foo': {'outputs': [], 'parameters': []}}, CloudFormation().get_stacks_dict())
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack_descriptions')
def test_get_stacks_dict_always_returns_empty_list_parameters_and_outputs(self, get_stack_descriptions_mock):
get_stack_descriptions_mock.return_value = [{"StackName": "Foo"}]
self.assertEqual({'Foo': {'outputs': [], 'parameters': []}}, CloudFormation().get_stacks_dict())
@patch('cfn_sphere.aws.cfn.boto3.client')
def test_handle_stack_event_returns_expected_event(self, _):
event = {
'PhysicalResourceId': 'arn:aws:cloudformation:eu-west-1:1234567890:stack/my-stack/my-stack-id',
'StackName': 'my-stack',
'LogicalResourceId': 'cfn-sphere-test-vpc',
'StackId': 'arn:aws:cloudformation:eu-west-1:1234567890:stack/my-stack/my-stack-id',
'ResourceType': 'AWS::CloudFormation::Stack',
'Timestamp': datetime.datetime(2016, 4, 1, 8, 3, 27, 548000, tzinfo=tzutc()),
'EventId': 'my-event-id',
'ResourceStatus': 'CREATE_COMPLETE'
}
valid_from_timestamp = datetime.datetime(2016, 4, 1, 8, 3, 25, 548000, tzinfo=tzutc())
cfn = CloudFormation()
result = cfn.handle_stack_event(event, valid_from_timestamp, "CREATE_COMPLETE")
self.assertDictEqual(event, result)
@patch('cfn_sphere.aws.cfn.boto3.client')
def test_handle_stack_event_returns_none_if_event_appears_to_early(self, _):
event = {
'PhysicalResourceId': 'arn:aws:cloudformation:eu-west-1:1234567890:stack/my-stack/my-stack-id',
'StackName': 'my-stack',
'LogicalResourceId': 'cfn-sphere-test-vpc',
'StackId': 'arn:aws:cloudformation:eu-west-1:1234567890:stack/my-stack/my-stack-id',
'ResourceType': 'AWS::CloudFormation::Stack',
'Timestamp': datetime.datetime(2016, 4, 1, 8, 3, 27, 548000, tzinfo=tzutc()),
'EventId': 'my-event-id',
'ResourceStatus': 'CREATE_COMPLETE'
}
valid_from_timestamp = datetime.datetime(2016, 4, 1, 8, 3, 30, 548000, tzinfo=tzutc())
cfn = CloudFormation()
result = cfn.handle_stack_event(event, valid_from_timestamp, "CREATE_COMPLETE")
self.assertIsNone(result)
@patch('cfn_sphere.aws.cfn.boto3.client')
def test_handle_stack_event_returns_none_if_event_has_not_expected_state(self, _):
event = {
'PhysicalResourceId': 'arn:aws:cloudformation:eu-west-1:1234567890:stack/my-stack/my-stack-id',
'StackName': 'my-stack',
'LogicalResourceId': 'cfn-sphere-test-vpc',
'StackId': 'arn:aws:cloudformation:eu-west-1:1234567890:stack/my-stack/my-stack-id',
'ResourceType': 'AWS::CloudFormation::Stack',
'Timestamp': datetime.datetime(2016, 4, 1, 8, 3, 27, 548000, tzinfo=tzutc()),
'EventId': 'my-event-id',
'ResourceStatus': 'CREATE_IN_PROGRESS'
}
valid_from_timestamp = datetime.datetime(2016, 4, 1, 8, 3, 25, 548000, tzinfo=tzutc())
cfn = CloudFormation()
result = cfn.handle_stack_event(event, valid_from_timestamp, "CREATE_COMPLETE")
self.assertIsNone(result)
@patch('cfn_sphere.aws.cfn.boto3.client')
def test_handle_stack_event_returns_none_if_event_is_no_stack_event(self, _):
event = {
'PhysicalResourceId': 'arn:aws:sns:eu-west-1:1234567890:my-topic',
'StackName': 'my-stack',
'LogicalResourceId': 'VPC',
'StackId': 'arn:aws:cloudformation:eu-west-1:1234567890:stack/my-stack/my-stack-id',
'ResourceType': 'AWS::SNS::Topic',
'Timestamp': datetime.datetime(2016, 4, 1, 8, 3, 27, 548000, tzinfo=tzutc()),
'EventId': 'my-event-id',
'ResourceStatus': 'CREATE_COMPLETE'
}
valid_from_timestamp = datetime.datetime(2016, 4, 1, 8, 3, 25, 548000, tzinfo=tzutc())
cfn = CloudFormation()
result = cfn.handle_stack_event(event, valid_from_timestamp, "CREATE_COMPLETE")
self.assertIsNone(result)
@patch('cfn_sphere.aws.cfn.boto3.client')
def test_handle_stack_event_raises_exception_on_error_event(self, _):
event = {
'PhysicalResourceId': 'arn:aws:sns:eu-west-1:1234567890:my-topic',
'StackName': 'my-stack',
'LogicalResourceId': 'VPC',
'StackId': 'arn:aws:cloudformation:eu-west-1:1234567890:stack/my-stack/my-stack-id',
'ResourceType': 'AWS::CloudFormation::Stack',
'Timestamp': datetime.datetime(2016, 4, 1, 8, 3, 27, 548000, tzinfo=tzutc()),
'EventId': 'my-event-id',
'ResourceStatus': 'UPDATE_FAILED'
}
valid_from_timestamp = datetime.datetime(2016, 4, 1, 8, 3, 25, 548000, tzinfo=tzutc())
cfn = CloudFormation()
with self.assertRaises(CfnStackActionFailedException):
cfn.handle_stack_event(event, valid_from_timestamp, "CREATE_COMPLETE")
@patch('cfn_sphere.aws.cfn.boto3.client')
def test_handle_stack_event_returns_none_on_rollback_in_progress_state(self, _):
event = {
'PhysicalResourceId': 'arn:aws:sns:eu-west-1:1234567890:my-topic',
'StackName': 'my-stack',
'LogicalResourceId': 'VPC',
'StackId': 'arn:aws:cloudformation:eu-west-1:1234567890:stack/my-stack/my-stack-id',
'ResourceType': 'AWS::CloudFormation::Stack',
'Timestamp': datetime.datetime(2016, 4, 1, 8, 3, 27, 548000, tzinfo=tzutc()),
'EventId': 'my-event-id',
'ResourceStatus': 'ROLLBACK_IN_PROGRESS',
'ResourceStatusReason': 'Foo'
}
valid_from_timestamp = datetime.datetime(2016, 4, 1, 8, 3, 25, 548000, tzinfo=tzutc())
cfn = CloudFormation()
result = cfn.handle_stack_event(event, valid_from_timestamp, "CREATE_COMPLETE")
self.assertIsNone(result)
@patch('cfn_sphere.aws.cfn.boto3.client')
def test_handle_stack_event_raises_exception_on_rollback_complete(self, _):
event = {
'PhysicalResourceId': 'arn:aws:sns:eu-west-1:1234567890:my-topic',
'StackName': 'my-stack',
'LogicalResourceId': 'VPC',
'StackId': 'arn:aws:cloudformation:eu-west-1:1234567890:stack/my-stack/my-stack-id',
'ResourceType': 'AWS::CloudFormation::Stack',
'Timestamp': datetime.datetime(2016, 4, 1, 8, 3, 27, 548000, tzinfo=tzutc()),
'EventId': 'my-event-id',
'ResourceStatus': 'ROLLBACK_COMPLETE'
}
valid_from_timestamp = datetime.datetime(2016, 4, 1, 8, 3, 25, 548000, tzinfo=tzutc())
cfn = CloudFormation()
with self.assertRaises(CfnStackActionFailedException):
cfn.handle_stack_event(event, valid_from_timestamp, "CREATE_COMPLETE")
@patch('cfn_sphere.aws.cfn.boto3.client')
@patch('cfn_sphere.aws.cfn.CloudFormation.wait_for_stack_action_to_complete')
def test_create_stack_calls_cloudformation_api_properly(self, _, cloudformation_mock):
stack = Mock(spec=CloudFormationStack)
stack.name = "stack-name"
stack.get_parameters_list.return_value = [('a', 'b')]
stack.get_tags_list.return_value = [('any-tag', 'any-tag-value')]
stack.parameters = {}
stack.template = Mock(spec=CloudFormationTemplate)
stack.template.name = "template-name"
stack.template.get_template_json.return_value = {'key': 'value'}
stack.timeout = 42
cfn = CloudFormation()
cfn.create_stack(stack)
cloudformation_mock.return_value.create_stack.assert_called_once_with(
Capabilities=['CAPABILITY_IAM'],
OnFailure='DELETE',
Parameters=[('a', 'b')],
StackName='stack-name',
Tags=[('any-tag', 'any-tag-value')],
TemplateBody={'key': 'value'},
TimeoutInMinutes=42)
@patch('cfn_sphere.aws.cfn.boto3.client')
@patch('cfn_sphere.aws.cfn.CloudFormation.wait_for_stack_action_to_complete')
def test_update_stack_calls_cloudformation_api_properly(self, _, cloudformation_mock):
stack = Mock(spec=CloudFormationStack)
stack.name = "stack-name"
stack.get_parameters_list.return_value = [('a', 'b')]
stack.get_tags_list.return_value = [('any-tag', 'any-tag-value')]
stack.parameters = {}
stack.template = Mock(spec=CloudFormationTemplate)
stack.template.name = "template-name"
stack.template.get_template_json.return_value = {'key': 'value'}
stack.timeout = 42
cfn = CloudFormation()
cfn.update_stack(stack)
cloudformation_mock.return_value.update_stack.assert_called_once_with(
Capabilities=['CAPABILITY_IAM'],
Parameters=[('a', 'b')],
StackName='stack-name',
Tags=[('any-tag', 'any-tag-value')],
TemplateBody={'key': 'value'})
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
def test_validate_stack_is_ready_for_action_raises_exception_on_unknown_stack_state(self, get_stack_mock):
stack_mock = Mock()
stack_mock.stack_name = "my-stack"
stack_mock.stack_status = "FOO"
get_stack_mock.return_value = stack_mock
stack = CloudFormationStack('', [], 'my-stack', 'my-region')
cfn = CloudFormation()
with self.assertRaises(CfnStackActionFailedException):
cfn.validate_stack_is_ready_for_action(stack)
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
def test_validate_stack_is_ready_for_action_raises_exception_on_update_in_progress(self, get_stack_mock):
stack_mock = Mock()
stack_mock.stack_name = "my-stack"
stack_mock.stack_status = "UPDATE_IN_PROGRESS"
get_stack_mock.return_value = stack_mock
stack = CloudFormationStack('', [], 'my-stack', 'my-region')
cfn = CloudFormation()
with self.assertRaises(CfnStackActionFailedException):
cfn.validate_stack_is_ready_for_action(stack)
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
def test_validate_stack_is_ready_for_action_raises_exception_on_delete_in_progress(self, get_stack_mock):
stack_mock = Mock()
stack_mock.stack_name = "my-stack"
stack_mock.stack_status = "DELETE_IN_PROGRESS"
get_stack_mock.return_value = stack_mock
stack = CloudFormationStack('', [], 'my-stack', 'my-region')
cfn = CloudFormation()
with self.assertRaises(CfnStackActionFailedException):
cfn.validate_stack_is_ready_for_action(stack)
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
def test_validate_stack_is_ready_for_action_raises_exception_on_create_in_progress(self, get_stack_mock):
stack_mock = Mock()
stack_mock.stack_name = "my-stack"
stack_mock.stack_status = "CREATE_IN_PROGRESS"
get_stack_mock.return_value = stack_mock
stack = CloudFormationStack('', [], 'my-stack', 'my-region')
cfn = CloudFormation()
with self.assertRaises(CfnStackActionFailedException):
cfn.validate_stack_is_ready_for_action(stack)
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
def test_validate_stack_is_ready_for_action_raises_proper_exception_on_boto_error(self, get_stack_mock):
get_stack_mock.side_effect = CfnSphereBotoError(None)
stack_mock = Mock()
stack_mock.stack_name = "my-stack"
stack_mock.stack_status = "UPDATE_COMPLETE"
get_stack_mock.return_value = stack_mock
stack = CloudFormationStack('', [], 'my-stack', 'my-region')
cfn = CloudFormation()
with self.assertRaises(CfnSphereBotoError):
cfn.validate_stack_is_ready_for_action(stack)
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
def test_validate_stack_is_ready_for_action_passes_if_stack_is_in_update_complete_state(self, get_stack_mock):
stack_mock = Mock()
stack_mock.stack_name = "my-stack"
stack_mock.stack_status = "UPDATE_COMPLETE"
get_stack_mock.return_value = stack_mock
stack = CloudFormationStack('', [], 'my-stack', 'my-region')
cfn = CloudFormation()
cfn.validate_stack_is_ready_for_action(stack)
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
def test_validate_stack_is_ready_for_action_passes_if_stack_is_in_create_complete_state(self, get_stack_mock):
stack_mock = Mock()
stack_mock.stack_name = "my-stack"
stack_mock.stack_status = "CREATE_COMPLETE"
get_stack_mock.return_value = stack_mock
stack = CloudFormationStack('', [], 'my-stack', 'my-region')
cfn = CloudFormation()
cfn.validate_stack_is_ready_for_action(stack)
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
def test_validate_stack_is_ready_for_action_passes_if_stack_is_in_rollback_complete_state(self, get_stack_mock):
stack_mock = Mock()
stack_mock.stack_name = "my-stack"
stack_mock.stack_status = "ROLLBACK_COMPLETE"
get_stack_mock.return_value = stack_mock
stack = CloudFormationStack('', [], 'my-stack', 'my-region')
cfn = CloudFormation()
cfn.validate_stack_is_ready_for_action(stack)
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
@patch('cfn_sphere.aws.cfn.boto3.client')
def test_get_stack_parameters_dict_returns_proper_dict(self, _, get_stack_mock):
cfn = CloudFormation()
stack_mock = Mock()
stack_mock.parameters = [{"ParameterKey": "myKey1", "ParameterValue": "myValue1"},
{"ParameterKey": "myKey2", "ParameterValue": "myValue2"}]
get_stack_mock.return_value = stack_mock
result = cfn.get_stack_parameters_dict('foo')
self.assertDictEqual({'myKey1': 'myValue1', 'myKey2': 'myValue2'}, result)
@patch('cfn_sphere.aws.cfn.CloudFormation.get_stack')
@patch('cfn_sphere.aws.cfn.boto3.client')
def test_get_stack_parameters_dict_returns_empty_dict_for_empty_parameters(self, _, get_stack_mock):
cfn = CloudFormation()
stack_mock = Mock()
stack_mock.parameters = []
get_stack_mock.return_value = stack_mock
result = cfn.get_stack_parameters_dict('foo')
self.assertDictEqual({}, result)
def test_is_boto_no_update_required_exception_returns_false_with_other_exception(self):
exception = Mock(spec=Exception)
exception.message = "No updates are to be performed."
self.assertFalse(CloudFormation.is_boto_no_update_required_exception(exception))
def test_is_boto_no_update_required_exception_returns_false_without_message(self):
exception = Mock(spec=ClientError)
exception.response = {"Error": {"Message": "Something went wrong"}}
self.assertFalse(CloudFormation.is_boto_no_update_required_exception(exception))
def test_is_boto_no_update_required_exception_returns_true_for_message(self):
exception = Mock(spec=ClientError)
exception.response = {"Error": {"Message": "No updates are to be performed."}}
self.assertTrue(CloudFormation.is_boto_no_update_required_exception(exception))
def test_is_boto_stack_does_not_exist_exception_returns_false_with_other_exception(self):
exception = Mock(spec=Exception)
exception.message = "No updates are to be performed."
self.assertFalse(CloudFormation.is_boto_stack_does_not_exist_exception(exception))
def test_is_boto_stack_does_not_exist_exception_returns_false_with_other_message(self):
exception = Mock(spec=ClientError)
exception.response = {"Error": {"Message": "Other error"}}
self.assertFalse(CloudFormation.is_boto_stack_does_not_exist_exception(exception))
def test_is_boto_stack_does_not_exist_exception_returns_true_for_message1(self):
exception = Mock(spec=ClientError)
exception.response = {"Error": {"Message": "Stack foo does not exist"}}
self.assertTrue(CloudFormation.is_boto_stack_does_not_exist_exception(exception))
def test_is_boto_stack_does_not_exist_exception_returns_true_for_message2(self):
exception = Mock(spec=ClientError)
exception.response = {"Error": {"Message": "Stack with id foo does not exist"}}
self.assertTrue(CloudFormation.is_boto_stack_does_not_exist_exception(exception))
|
|
#
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for unit testing"""
import os
import sys
import stat
import tempfile
import unittest
import logging
from ganeti import utils
def GetSourceDir():
return os.environ.get("TOP_SRCDIR", ".")
def TestDataFilename(name):
"""Returns the filename of a given test data file.
@type name: str
@param name: the 'base' of the file name, as present in
the test/data directory
@rtype: str
@return: the full path to the filename, such that it can
be used in 'make distcheck' rules
"""
return "%s/test/data/%s" % (GetSourceDir(), name)
def ReadTestData(name):
"""Returns the content of a test data file.
This is just a very simple wrapper over utils.ReadFile with the
proper test file name.
"""
return utils.ReadFile(TestDataFilename(name))
def _SetupLogging(verbose):
"""Setupup logging infrastructure.
"""
fmt = logging.Formatter("%(asctime)s: %(threadName)s"
" %(levelname)s %(message)s")
if verbose:
handler = logging.StreamHandler()
else:
handler = logging.FileHandler(os.devnull, "a")
handler.setLevel(logging.NOTSET)
handler.setFormatter(fmt)
root_logger = logging.getLogger("")
root_logger.setLevel(logging.NOTSET)
root_logger.addHandler(handler)
class GanetiTestProgram(unittest.TestProgram):
def runTests(self):
"""Runs all tests.
"""
_SetupLogging("LOGTOSTDERR" in os.environ)
sys.stderr.write("Running %s\n" % self.progName)
sys.stderr.flush()
# Ensure assertions will be evaluated
if not __debug__:
raise Exception("Not running in debug mode, assertions would not be"
" evaluated")
# Check again, this time with a real assertion
try:
assert False
except AssertionError:
pass
else:
raise Exception("Assertion not evaluated")
return unittest.TestProgram.runTests(self)
class GanetiTestCase(unittest.TestCase):
"""Helper class for unittesting.
This class defines a few utility functions that help in building
unittests. Child classes must call the parent setup and cleanup.
"""
def setUp(self):
self._temp_files = []
def tearDown(self):
while self._temp_files:
try:
utils.RemoveFile(self._temp_files.pop())
except EnvironmentError:
pass
def assertFileContent(self, file_name, expected_content):
"""Checks that the content of a file is what we expect.
@type file_name: str
@param file_name: the file whose contents we should check
@type expected_content: str
@param expected_content: the content we expect
"""
actual_content = utils.ReadFile(file_name)
self.assertEqual(actual_content, expected_content)
def assertFileMode(self, file_name, expected_mode):
"""Checks that the mode of a file is what we expect.
@type file_name: str
@param file_name: the file whose contents we should check
@type expected_mode: int
@param expected_mode: the mode we expect
"""
st = os.stat(file_name)
actual_mode = stat.S_IMODE(st.st_mode)
self.assertEqual(actual_mode, expected_mode)
def assertFileUid(self, file_name, expected_uid):
"""Checks that the user id of a file is what we expect.
@type file_name: str
@param file_name: the file whose contents we should check
@type expected_uid: int
@param expected_uid: the user id we expect
"""
st = os.stat(file_name)
actual_uid = st.st_uid
self.assertEqual(actual_uid, expected_uid)
def assertFileGid(self, file_name, expected_gid):
"""Checks that the group id of a file is what we expect.
@type file_name: str
@param file_name: the file whose contents we should check
@type expected_gid: int
@param expected_gid: the group id we expect
"""
st = os.stat(file_name)
actual_gid = st.st_gid
self.assertEqual(actual_gid, expected_gid)
def assertEqualValues(self, first, second, msg=None):
"""Compares two values whether they're equal.
Tuples are automatically converted to lists before comparing.
"""
return self.assertEqual(UnifyValueType(first),
UnifyValueType(second),
msg=msg)
def _CreateTempFile(self):
"""Creates a temporary file and adds it to the internal cleanup list.
This method simplifies the creation and cleanup of temporary files
during tests.
"""
fh, fname = tempfile.mkstemp(prefix="ganeti-test", suffix=".tmp")
os.close(fh)
self._temp_files.append(fname)
return fname
def patch_object(*args, **kwargs):
"""Unified patch_object for various versions of Python Mock.
Different Python Mock versions provide incompatible versions of patching an
object. More recent versions use _patch_object, older ones used patch_object.
This function unifies the different variations.
"""
import mock
try:
# pylint: disable=W0212
return mock._patch_object(*args, **kwargs)
except AttributeError:
# pylint: disable=E1101
return mock.patch_object(*args, **kwargs)
def UnifyValueType(data):
"""Converts all tuples into lists.
This is useful for unittests where an external library doesn't keep types.
"""
if isinstance(data, (tuple, list)):
return [UnifyValueType(i) for i in data]
elif isinstance(data, dict):
return dict([(UnifyValueType(key), UnifyValueType(value))
for (key, value) in data.iteritems()])
return data
class CallCounter(object):
"""Utility class to count number of calls to a function/method.
"""
def __init__(self, fn):
"""Initializes this class.
@type fn: Callable
"""
self._fn = fn
self._count = 0
def __call__(self, *args, **kwargs):
"""Calls wrapped function with given parameters.
"""
self._count += 1
return self._fn(*args, **kwargs)
def Count(self):
"""Returns number of calls.
@rtype: number
"""
return self._count
|
|
"""Collection of convenient functions
Functions:
adjust_lon_range -- Express longitude values in desired 360 degree interval
apply_land_ocean_mask -- Apply a land or ocean mask from an sftlf (land surface fraction) file
apply_lon_filter -- Set values outside of specified longitude range to zero
broadcast_array -- Broadcast an array to a target shape
calc_significance -- Perform significance test
convert_to_joules -- Convert units from Watts to Joules
coordinate_paris -- Generate lat/lon pairs
create_basin_array -- Create an ocean basin array
dict_filter -- Filter dictionary according to specified keys
effective_sample_size -- Calculate the effective sample size (accounting for autocorrelation)
find_nearest -- Find the closest array item to value
find_duplicates -- Return list of duplicates in a list
fix_label -- Fix formatting of an axis label taken from the command line
get_bounds_list -- Create a bounds (i.e. pairs) list from an edge list
get_threshold -- Turn the user input threshold into a numeric threshold
hi_lo -- Determine the new highest and lowest value.
list_kwargs -- List keyword arguments of a function
mask_marginal_seas -- Mask the marginal seas
match_dates -- Take list of dates and match with the corresponding times
in a detailed time axis
single2list -- Check if item is a list, then convert if not
units_info -- Make the units taken from a file LaTeX math compliant
"""
import numpy
from scipy import stats
import pdb, re
import inspect
import iris
import statsmodels.api as sm
from statsmodels.tsa.stattools import acf
def add_globe_basin(data, basin_cube):
"""Add a global basin to a data array with a basin dimension.
Includes all basins (including Arctic) but not marginal seas or land.
"""
flag_meanings = basin_cube.attributes['flag_meanings']
flag_values = basin_cube.attributes['flag_values']
basin_values = basin_values = numpy.array([int(num) for num in flag_values.split(' ')])
assert data.shape[-1] == len(basin_values), 'Basin axis must be final axis'
assert data.ndim in [2, 3, 4], 'function only setup to handle data of 2, 3 or 4 dimensions'
if flag_meanings.split(' ')[-1] == 'land':
# new basin file format
assert flag_values == "11 12 13 14 15 16 17 18"
assert flag_meanings == "north_atlantic south_atlantic north_pacific south_pacific indian arctic marginal_seas land"
final_index = -2
else:
# old basin file format
assert flag_values == "11 12 13 14 15 16 17"
assert flag_meanings == "north_atlantic south_atlantic north_pacific south_pacific indian arctic marginal_seas_and_land"
final_index = -1
if data.ndim == 2:
global_data = data[:, 0:final_index].sum(axis=-1)
elif data.ndim == 3:
global_data = data[:, :, 0:final_index].sum(axis=-1)
elif data.ndim == 4:
global_data = data[:, :, :, 0:final_index].sum(axis=-1)
global_data = global_data[..., numpy.newaxis]
data = numpy.ma.concatenate((data, global_data), axis=-1)
basin_values = numpy.append(basin_values, 19)
flag_values = flag_values + ' 19'
flag_meanings = flag_meanings + ' globe'
return data, basin_values, flag_values, flag_meanings
def get_basin_details(basin_cube):
"""Extract details from a basin cube created by calc_basin.py."""
basin_values = numpy.array([int(num) for num in basin_cube.attributes['flag_values'].split(' ')])
basin_edges = basin_values - 0.5
basin_edges = numpy.append(basin_edges, basin_values[-1] + 0.5)
return basin_values, basin_edges
def adjust_lon_range(lons, radians=True, start=0.0):
"""Express longitude values in a 360 degree (or 2*pi radians) interval.
Args:
lons (list/tuple): Longitude axis values (monotonically increasing)
radians (bool): Specify whether input data are in radians (True) or
degrees (False). Output will be the same units.
start (float, optional): Start value for the output interval (add 360 degrees or 2*pi
radians to get the end point)
"""
lons = single2list(lons, numpy_array=True)
interval360 = 2.0*numpy.pi if radians else 360.0
end = start + interval360
less_than_start = numpy.ones([len(lons),])
while numpy.sum(less_than_start) != 0:
lons = numpy.where(lons < start, lons + interval360, lons)
less_than_start = lons < start
more_than_end = numpy.ones([len(lons),])
while numpy.sum(more_than_end) != 0:
lons = numpy.where(lons >= end, lons - interval360, lons)
more_than_end = lons >= end
return lons
def apply_land_ocean_mask(data_cube, mask_cube, include_only, threshold=0.1):
"""Apply a land or ocean mask from an sftlf (land surface fraction) file.
Args:
data_cube (iris.cube): cube to apply the mask to
mask_cube (iris.cube): sftlf_cube
include_only (str): keep 'land' or 'ocean' values
threshold (float): land percentage below which cell is considered ocean
There is no land when cell value == 0
"""
assert mask_cube.data.max() == 100
target_shape = data_cube.shape
target_ndim = len(target_shape)
if include_only == 'land':
mask_array = numpy.where(mask_cube.data > threshold, False, True)
elif include_only == 'ocean':
mask_array = numpy.where(mask_cube.data < threshold, False, True)
if not mask_array.shape == target_shape:
mask_array = broadcast_array(mask_array, [target_ndim - 2, target_ndim - 1], target_shape)
data_cube.data = numpy.ma.asarray(data_cube.data)
data_cube.data.mask = mask_array
return data_cube
def apply_lon_filter(data, lon_bounds):
"""Set values outside of specified longitude range to zero.
Args:
data (numpy.ndarray): Array of longitude values.
lon_bounds (list/tuple): Specified longitude range (min, max)
"""
# Convert to common bounds (0, 360)
lon_min = adjust_lon_range(lon_bounds[0], radians=False, start=0.0)
lon_max = adjust_lon_range(lon_bounds[1], radians=False, start=0.0)
lon_axis = adjust_lon_range(data.getLongitude()[:], radians=False, start=0.0)
# Make required values zero
ntimes, nlats, nlons = data.shape
lon_axis_tiled = numpy.tile(lon_axis, (ntimes, nlats, 1))
new_data = numpy.where(lon_axis_tiled < lon_min, 0.0, data)
return numpy.where(lon_axis_tiled > lon_max, 0.0, new_data)
def broadcast_array(array, axis_index, shape):
"""Broadcast an array to a target shape.
Args:
array (numpy.ndarray)
axis_index (int or tuple): Postion in the target shape that the
axis/axes of the array corresponds to
e.g. if array corresponds to (depth, lat, lon) in (time, depth, lat, lon)
then axis_index = [1, 3]
e.g. if array corresponds to (lat) in (time, depth, lat, lon)
then axis_index = 2
shape (tuple): shape to broadcast to
For a one dimensional array, make start_axis_index = end_axis_index
"""
if not array.shape == shape:
if type(axis_index) in [float, int]:
start_axis_index = end_axis_index = axis_index
else:
assert len(axis_index) == 2
start_axis_index, end_axis_index = axis_index
dim = start_axis_index - 1
while dim >= 0:
array = array[numpy.newaxis, ...]
array = numpy.repeat(array, shape[dim], axis=0)
dim = dim - 1
dim = end_axis_index + 1
while dim < len(shape):
array = array[..., numpy.newaxis]
array = numpy.repeat(array, shape[dim], axis=-1)
dim = dim + 1
assert array.shape == shape, "Final broadcast array not target shape"
return array
def calc_significance(data_subset, data_all, standard_name):
"""Perform significance test.
One sample t-test, with sample size adjusted for autocorrelation.
Reference:
Zieba (2010). doi:10.2478/v10178-010-0001-0
"""
from statsmodels.tsa.stattools import acf
# Data must be three dimensional, with time first
assert len(data_subset.shape) == 3, "Input data must be 3 dimensional"
# Define autocorrelation function
n = data_subset.shape[0]
autocorr_func = numpy.apply_along_axis(acf, 0, data_subset, nlags=n - 2)
# Calculate effective sample size (formula from Zieba2010, eq 12)
k = numpy.arange(1, n - 1)
r_k_sum = ((n - k[:, None, None]) / float(n)) * autocorr_func[1:]
n_eff = float(n) / (1 + 2 * r_k_sum.sum(axis=0))
# Calculate significance
var_x = data_subset.var(axis=0) / n_eff
tvals = (data_subset.mean(axis=0) - data_all.mean(axis=0)) / numpy.sqrt(var_x)
pvals = stats.t.sf(numpy.abs(tvals), n - 1) * 2 # two-sided pvalue = Prob(abs(t)>tt)
notes = "One sample t-test, with sample size adjusted for autocorrelation (Zieba2010, eq 12)"
pval_atts = {'standard_name': standard_name,
'long_name': standard_name,
'units': ' ',
'notes': notes,}
return pvals, pval_atts
def chunked_collapse_by_time(cube, collapse_dims, agg_method, weights=None):
"""Collapse a spatial dimension by chunking along time axis.
Args:
cube (iris.cube.Cube)
collapse_dim (str): dimension to collapse
agg_method (iris.analysis.WeightedAggregator): aggregation method
weights (numpy.ndarray): weights for aggregation
"""
assert agg_method in [iris.analysis.SUM, iris.analysis.MEAN]
chunk_list = iris.cube.CubeList([])
coord_names = [coord.name() for coord in cube.dim_coords]
start_indexes, step = get_chunks(cube.shape, coord_names, chunk=True)
for index in start_indexes:
if type(weights) in [numpy.ndarray, numpy.ma.core.MaskedArray]:
chunk = cube[index:index+step, ...].collapsed(collapse_dims, agg_method, weights=weights[index:index+step, ...])
else:
chunk = cube[index:index+step, ...].collapsed(collapse_dims, agg_method)
chunk_list.append(chunk)
collapsed_cube = chunk_list.concatenate()[0]
return collapsed_cube
def effective_sample_size(data, n_orig):
"""Calculate the effective sample size, accounting for autcorrelation.
Method from Zieba2010, equation 12
https://content.sciendo.com/view/journals/mms/17/1/article-p3.xml
"""
autocorr_func = acf(data, nlags=n_orig-2)
k = numpy.arange(1, n_orig - 1)
r_k_sum = ((n_orig - k[:]) / float(n_orig)) * autocorr_func[1:]
n_eff = float(n_orig) / (1 + 2 * r_k_sum.sum())
return n_eff
def flux_to_magnitude(cube):
"""Convert units from a flux to magnitude.
Caters for s-1 or Watts (i.e. J s-1).
"""
orig_units = str(cube.units)
assert ('W' in orig_units) or ('s-1' in orig_units)
dim_coord_names = [coord.name() for coord in cube.dim_coords]
if 'year' in dim_coord_names:
assert 'time' not in dim_coord_names
time_span_days = [365.25] * cube.coord('year').shape[0]
time_span_days = numpy.array(time_span_days)
else:
assert 'days' in str(cube.coord('time').units)
time_span_days = cube.coord('time').bounds[:, 1] - cube.coord('time').bounds[:, 0]
time_span_seconds = time_span_days * 60 * 60 * 24
cube.data = cube.data * broadcast_array(time_span_seconds, 0, cube.shape)
if 'W' in orig_units:
cube.units = orig_units.replace('W', 'J')
else:
cube.units = orig_units.replace('s-1', '')
return cube
def coordinate_pairs(lat_axis, lon_axis):
"""Take the latitude and longitude values from given grid axes
and produce a flattened lat and lon array, with element-wise pairs
corresponding to every grid point."""
lon_mesh, lat_mesh = numpy.meshgrid(lon_axis, lat_axis) # This is the correct order
return lat_mesh.flatten(), lon_mesh.flatten()
def create_basin_array(cube):
"""Create an ocean basin array.
For similarity with the CMIP5 basin file, in the output:
Atlantic Ocean = 2
Pacific Ocean = 3
Indian Ocean = 5
(land = 0)
FIXME: When applied to CMIP5 data, some of the marginal seas might
not be masked
"""
pacific_bounds = [147, 294]
indian_bounds = [23, 147]
lat_axis = cube.coord('latitude').points
lon_axis = adjust_lon_range(cube.coord('longitude').points, radians=False)
coord_names = [coord.name() for coord in cube.dim_coords]
lat_index = coord_names.index('latitude')
lon_index = coord_names.index('longitude')
lat_array = broadcast_array(lat_axis, lat_index, cube.shape)
lon_array = broadcast_array(lon_axis, lon_index, cube.shape)
basin_array = numpy.ones(cube.shape) * 2
basin_array = numpy.where((lon_array >= pacific_bounds[0]) & (lon_array <= pacific_bounds[1]), 3, basin_array)
basin_array = numpy.where((lon_array >= indian_bounds[0]) & (lon_array <= indian_bounds[1]), 5, basin_array)
basin_array = numpy.where((basin_array == 3) & (lon_array >= 279) & (lat_array >= 10), 2, basin_array)
basin_array = numpy.where((basin_array == 5) & (lon_array >= 121) & (lat_array >= 0), 3, basin_array)
basin_array = numpy.where((basin_array == 5) & (lat_array >= 25), 0, basin_array)
return basin_array
def dict_filter(indict, key_list):
"""Filter dictionary according to specified keys."""
return dict((key, value) for key, value in list(indict.items()) if key in key_list)
def find_duplicates(inlist):
"""Return list of duplicates in a list."""
D = defaultdict(list)
for i,item in enumerate(mylist):
D[item].append(i)
D = {k:v for k,v in list(D.items()) if len(v)>1}
return D
def find_nearest(array, value, index=False):
"""Find the closest array item to value.
index: return index instead of value
"""
idx = (numpy.abs(numpy.array(array) - value)).argmin()
error = array[idx] - value
if index:
return idx, error
else:
return array[idx], error
def fix_label(label):
"""Fix axis label taken from the command line."""
replace_dict = {'_': ' ',
'degE': '$^{\circ}$E',
'ms-1': '$m s^{-1}$',
'm.s-1': '$m s^{-1}$',
'Wm-2': '$W m^{-2}$',
'1000000 m2.s-1': '$10^6$m$^2$s$^{-1}$'
}
for value, replacement in list(replace_dict.items()):
label = label.replace(value, replacement)
return label
def get_bounds_list(edges):
"""Create a bounds (i.e. pairs) list from an edge list"""
bounds_list = []
for i in range(len(edges) - 1):
interval = [edges[i], edges[i+1]]
bounds_list.append(interval)
return numpy.array(bounds_list)
def get_chunks(cube_shape, coord_names, chunk=True, step=2):
"""Provide details for chunking along the time axis.
The chunk option can be used to just do one single chunk.
"""
ntimes = cube_shape[0]
if chunk:
assert coord_names[0] == 'time'
remainder = ntimes % step
while remainder == 1:
step = step + 1
remainder = ntimes % step
start_indexes = range(0, ntimes, step)
else:
start_indexes = [0]
step = ntimes
return start_indexes, step
def get_threshold(data, threshold_str, axis=None):
"""Turn the user input threshold into a numeric threshold."""
if 'pct' in threshold_str:
value = float(re.sub('pct', '', threshold_str))
threshold_float = numpy.percentile(data, value, axis=axis)
else:
threshold_float = float(threshold_str)
return threshold_float
def hi_lo(data_series, current_max, current_min):
"""Determine the new highest and lowest value."""
try:
highest = numpy.max(data_series)
except:
highest = max(data_series)
if highest > current_max:
new_max = highest
else:
new_max = current_max
try:
lowest = numpy.min(data_series)
except:
lowest = min(data_series)
if lowest < current_min:
new_min = lowest
else:
new_min = current_min
return new_max, new_min
def list_kwargs(func):
"""List keyword arguments of a function."""
details = inspect.getargspec(func)
nopt = len(details.defaults)
return details.args[-nopt:]
def mask_marginal_seas(data_cube, basin_cube):
"""Mask marginal seas.
The marginal seas all have a basin value > 5.
"""
data_cube.data = numpy.ma.asarray(data_cube.data)
ndim = data_cube.ndim
basin_array = broadcast_array(basin_cube.data, [ndim - 2, ndim - 1], data_cube.shape)
data_cube.data.mask = numpy.where((data_cube.data.mask == False) & (basin_array <= 5), False, True)
return data_cube
def mask_unwanted_seas(data_cube, basin_cube, basins_to_keep):
"""Mask unwanted seas.
Args:
data_cube (iris.cube.Cube)
basin_cube (iris.cube.Cube): CMIP5 basin file
basins_to_keep (list): list of numbers corresponding to basins to keep
Basin names and corresponding number in basin files
0 global_land
1 southern_ocean
2 atlantic_ocean
3 pacific_ocean
4 arctic_ocean
5 indian_ocean
6 mediterranean_sea
7 black_sea
8 hudson_bay
"""
ndim = data_cube.ndim
basin_array = broadcast_array(basin_cube.data, [ndim - 2, ndim - 1], data_cube.shape)
data_cube.data.mask = numpy.where((data_cube.data.mask == False) & numpy.isin(basin_array, basins_to_keep), False, True)
return data_cube
def match_dates(datetimes, datetime_axis):
"""Take list of datetimes and match with the corresponding datetimes in a time axis.
Args:
datetimes (list/tuple)
datetime_axis (list/tuple)
"""
dates = list(map(split_dt, datetimes))
date_axis = list(map(split_dt, datetime_axis[:]))
match_datetimes = []
miss_datetimes = []
for i in range(0, len(datetime_axis)):
if date_axis[i] in dates:
match_datetimes.append(datetime_axis[i])
else:
miss_datetimes.append(datetime_axis[i])
return match_datetimes, miss_datetimes
def salinity_bins():
"""Define commonly used salinity bins."""
smin = -0.2
smax = 80
s_edges = numpy.arange(30, 40.05, 0.1)
s_edges = numpy.insert(s_edges, 0, [-0.2, 10, 20])
s_edges = numpy.append(s_edges, 50)
s_edges = numpy.append(s_edges, 60)
s_edges = numpy.append(s_edges, 80)
s_values = (s_edges[1:] + s_edges[:-1]) / 2
s_values = list(s_values)
s_values[0] = 5
return s_values, s_edges
def split_dt(dt):
"""Split a numpy.datetime64 value so as to just keep the date part."""
return str(dt).split('T')[0]
def single2list(item, numpy_array=False):
"""Check if item is a list, then convert if not."""
if type(item) == list or type(item) == tuple or type(item) == numpy.ndarray:
output = item
elif type(item) == str:
output = [item,]
else:
try:
test = len(item)
except TypeError:
output = [item,]
if numpy_array and not isinstance(output, numpy.ndarray):
return numpy.array(output)
else:
return output
def units_info(units):
"""Make the units taken from a file LaTeX math compliant.
This function particularly deals with powers:
e.g. 10^22 J
"""
components = units.split()
exponent = None
pieces = []
for component in components:
index = component.find('^')
if not index == -1:
exponent = component[index + 1:]
component = component[:index + 1] + '{' + component[index + 1:]
component = component + '}'
index = component.find('-')
if not index == -1:
component = component[:index] + '^{' + component[index:]
component = component + '}'
pieces.append(component)
tex_units = ''
for piece in pieces[:-1]:
tex_units = tex_units + piece + ' \;'
tex_units = tex_units + ' ' + pieces[-1]
tex_units = '$' + tex_units + '$'
return tex_units, exponent
|
|
from optparse import OptionParser
import os
import glob
import numpy as np
from collections import defaultdict
from ..util import dirs
from ..util import file_handling as fh
from ..export import html
def main():
usage = "%prog project JLDA_output_dir html_output_dir"
parser = OptionParser(usage=usage)
#parser.add_option('--keyword', dest='key', default=None,
# help='Keyword argument: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
project = dirs.make_base_dir(args[0])
JLDA_dir = args[1]
html_output_dir = args[2]
export_to_html(JLDA_dir, html_output_dir)
def export_to_html(JLDA_dir, html_output_dir):
entity_text_mentions_temp = fh.read_json(os.path.join(dirs.persona_dir, 'entity_text_mentions.json'))
entity_text_mentions = {}
for k in entity_text_mentions_temp.keys():
basename = os.path.splitext(os.path.basename(k))[0]
entity_text_mentions[basename] = entity_text_mentions_temp[k]
print entity_text_mentions.keys()[:10]
parsed_dir = os.path.join(dirs.data_stanford_dir, 'parsed')
parsed_files = glob.glob(os.path.join(parsed_dir, '*.json'))
#rnn_data = fh.read_json(os.path.join(dirs.persona_dir, 'rnn_data.json'))
#docs = fh.read_json(os.path.join(dirs.persona_dir, 'docs.json'))
entity_personas = get_entity_personas(JLDA_dir)
persona_words = get_persona_words(JLDA_dir)
article_story_types = get_article_story_types(JLDA_dir)
article_story_types.index = [os.path.splitext(os.path.basename(f))[0] for f in article_story_types.index]
colors = get_colors()
css = make_css_file(colors)
fh.write_list_to_text(css, os.path.join(html_output_dir, 'style.css'))
story_types_lists = defaultdict(list)
count = 0
for d_i, d in enumerate(parsed_files):
#d = parsed_files[d_i]
basename = os.path.splitext(os.path.basename(d))[0]
count += 1
if basename in entity_text_mentions and basename[:3] == 'Imm':
parsed = fh.read_json(d)
entities = defaultdict(dict)
roles = defaultdict(dict)
first_words = {}
entity_index = {}
for e_i, entity in enumerate(entity_text_mentions[basename]):
for s_i, s in enumerate(entity_text_mentions[basename][entity]['sent_indices']):
sent_index = s
token_index = entity_text_mentions[basename][entity]['token_indices'][s_i]
# store the entity id of this token
entities[sent_index][token_index] = entity
if entity not in first_words:
first_words[entity] = parsed[sent_index][token_index]['word'].lower()
entity_index[entity] = len(entity_index.keys())
for tuple in entity_text_mentions[basename][entity]['roles']:
tuple_type, token, sent_index, token_index = tuple
if token_index in roles[sent_index]:
roles[sent_index][token_index].append(str(entity))
else:
roles[sent_index][token_index] = [str(entity)]
text_blocks = []
block_classes = []
new_block = True
entity = False
role = False
for s_i, sent in enumerate(parsed):
for t_i, token in enumerate(sent):
word = token['word']
d3_classes = []
if s_i in roles:
if t_i in roles[s_i]:
for e in roles[s_i][t_i]:
d3_classes.append('role_' + str(entity_index[e]))
role = True
if s_i in entities:
if t_i in entities[s_i]:
d3_class = 'entity_' + str(entity_index[entities[s_i][t_i]])
d3_classes.append(d3_class)
entity = True
if entity or role:
new_block = True
if len(d3_classes) == 0:
d3_classes.append('plain_text')
if new_block:
text_blocks.append("")
block_classes.append(d3_classes)
new_block = True
if word == '.' or word == ',' or word == '?' or word == '!' or word == "''":
text_blocks[-1] += word
else:
text_blocks[-1] += " " + word
if not (entity or role):
new_block = False
entity = False
role = False
d3 = make_d3_header()
story_types = article_story_types.loc[basename].as_matrix()[:-1]
order = list(np.argsort(story_types).tolist())
order.reverse()
d3 += add_d3_paragraph()
for i in range(3):
story_types_lists[order[i]].append((basename, story_types[order[i]]))
d3 += add_d3_text([], str(order[i]) + ": (" + str(story_types[order[i]]) + ")")
d3 += add_br()
d3 += add_d3_paragraph()
d3 += add_d3_selectors(colors, first_words, entity_index)
d3 += add_d3_paragraph()
for b_i, block in enumerate(text_blocks):
d3 += add_d3_text(block_classes[b_i], block)
keys = entity_text_mentions[basename].keys()
keys.sort()
for e_i, entity in enumerate(keys):
d3 += add_d3_paragraph()
d3 += add_d3_text(['legend'], first_words[entity], color=colors[e_i % len(colors)])
d3 += add_br()
persona_counts = entity_personas.iloc[int(entity)].as_matrix()[:-1]
order = list(np.argsort(persona_counts).tolist())
order.reverse()
for i in range(5):
if persona_counts[order[i]] > 4:
d3 += add_d3_text(['legend_' + str(entity)], str(persona_counts[order[i]]) + ' : ' + ' '.join(persona_words[order[i]]))
d3 += add_br()
# for ap in appearances:
# for tuple in ap:
# print (tuple[3], tuple[2])
fh.write_list_to_text(d3, os.path.join(html_output_dir, basename + '.js'))
html_text = make_html_file(basename)
fh.write_list_to_text(html_text, os.path.join(html_output_dir, basename + '.html'))
#d_i += 1
for story_type, articles in story_types_lists.items():
vals = [v for a, v in articles]
order = list(np.argsort(vals).tolist())
order.reverse()
html_text = html.make_header('Story type ' + str(story_type))
html_text += html.make_body_start()
links = []
for i, index in enumerate(order):
link_text = articles[index][0] + ' (' + str(articles[index][1]) + ')'
links.append(html.make_link(articles[index][0] + '.html', link_text, new_window=False))
if i > 100:
break
html_text += html.make_unordered_list(links)
html_text += html.make_body_end()
with open(os.path.join(html_output_dir, 'story_type_' + str(story_type) + '.html'), 'w') as output_file:
output_file.write(html_text)
def get_colors():
colors = ['DodgerBlue',
'OrangeRed',
'MediumSeaGreen',
'Orange',
'Peru',
'DarkOrchid',
'HotPink',
'Plum',
'Turqoise',
'DodgerBlue',
'OrangeRed',
'MediumSeaGreen',
'Orange',
'Peru',
'DarkOrchid',
'HotPink',
'Plum',
'Turqoise']
return colors
def make_css_file(colors):
lines = []
lines.append('body { font: 18px Helvetica; }')
lines.append('')
for c_i, c in enumerate(colors):
lines.append('.entity_' + str(c_i) + '{ color : black; font-weight: bold; }')
lines.append('.role_' + str(c_i) + '{ color : black; font-style: italic; }')
lines.append('')
return lines
def make_d3_header():
lines = []
lines.append('var w=960;')
lines.append('var h=500;')
lines.append('')
lines.append('var svg = d3.select("body")')
lines.append('')
return lines
def add_d3_paragraph():
return ['d3.select("body").append("p")']
def add_d3_text(d3_classes, text, color=None):
lines = []
lines.append('d3.select("body").append("text")')
lines.append('.attr("class", "' + ' '.join(d3_classes) + '")')
if color is not None:
lines.append('.style("color", "' + color + '")')
lines.append('.text("' + text + ' ");')
lines.append('')
return lines
def add_br():
return ['d3.select("body").append("br")', '']
def add_d3_selectors(colors, words, entity_index):
colors = ['black'] + colors
keys = sorted(words.keys())
keys = [''] + keys
lines = []
for c_i, c in enumerate(keys):
lines.append('d3.select("body").append("text")')
lines.append('.attr("class", "legend")')
lines.append('.style("color", "' + colors[c_i % len(colors)] + ' ")')
lines.append('.on("click", function(){')
for e_i, e in words.items():
lines.append('d3.selectAll(".entity_' + str(entity_index[e_i]) + '").style("color", "black");')
lines.append('d3.selectAll(".role_' + str(entity_index[e_i]) + '").style("color", "black");')
if c_i > 0:
lines.append('d3.selectAll(".entity_' + str(entity_index[c]) + '").style("color", "' + colors[c_i % len(colors)] + '");')
lines.append('d3.selectAll(".role_' + str(entity_index[c]) + '").style("color", "' + colors[c_i % len(colors)] + '");')
lines.append('})')
if c_i > 0:
lines.append('.text("' + words[c] + ' ");')
else:
lines.append('.text("OFF ");')
lines.append('')
return lines
def get_persona_words(input_dir):
head_words = {}
df = fh.read_csv(os.path.join(input_dir, 'persona_head_word_counts.csv'), header=-1)
index = df.index
columns = df.columns
for c_i, c in enumerate(columns):
counts = df[c].as_matrix()
order = list(np.argsort(counts).tolist())
order.reverse()
top_words = [index[i].upper() for i in order[:5]]
head_words[c_i] = top_words
df_roles = fh.read_csv(os.path.join(input_dir, 'persona_role_vocab_counts.csv'), header=-1)
index = df_roles.index
columns = df.columns[:-1]
for c_i, c in enumerate(columns):
counts = df_roles[c].as_matrix()
order = list(np.argsort(counts).tolist())
order.reverse()
top_words = [index[i].lower() for i in order[:5]]
head_words[c_i] += top_words
return head_words
def get_entity_personas(input_dir):
df = fh.read_csv(os.path.join(input_dir, 'entity_persona_counts.csv'), header=-1)
df.div(df.sum(axis=1), axis=0)
return df
def get_article_story_types(input_dir):
df = fh.read_csv(os.path.join(input_dir, 'document_story_types.csv'), header=-1)
df.div(df.sum(axis=1), axis=0)
return df
def make_html_file(filename):
lines = []
lines.append('<!DOCTYPE html>')
lines.append('<html>')
lines.append('<head>')
lines.append('<meta http-equiv="Content-Type" content="text/html;charset=utf-8">')
lines.append('<title>' + filename + '</title>')
lines.append('<script type="text/javascript" src="d3/d3.min.js"></script>')
lines.append('<link href="style.css" rel="stylesheet">')
lines.append('</head>')
lines.append('<body>')
lines.append('<script type="text/javascript" src="' + filename + '.js"></script>')
lines.append('</body>')
lines.append('</html>')
return lines
if __name__ == '__main__':
main()
|
|
from collections import OrderedDict
from xml.etree import ElementTree as ET
import openmc
from openmc.clean_xml import sort_xml_elements, clean_xml_indentation
from openmc.checkvalue import check_type
def reset_auto_ids():
"""Reset counters for all auto-generated IDs"""
openmc.reset_auto_material_id()
openmc.reset_auto_surface_id()
openmc.reset_auto_cell_id()
openmc.reset_auto_universe_id()
class Geometry(object):
"""Geometry representing a collection of surfaces, cells, and universes.
Parameters
----------
root_universe : openmc.Universe, optional
Root universe which contains all others
Attributes
----------
root_universe : openmc.Universe
Root universe which contains all others
"""
def __init__(self, root_universe=None):
self._root_universe = None
self._offsets = {}
if root_universe is not None:
self.root_universe = root_universe
@property
def root_universe(self):
return self._root_universe
@root_universe.setter
def root_universe(self, root_universe):
check_type('root universe', root_universe, openmc.Universe)
if root_universe.id != 0:
msg = 'Unable to add root Universe "{0}" to Geometry since ' \
'it has ID="{1}" instead of ' \
'ID=0'.format(root_universe, root_universe.id)
raise ValueError(msg)
self._root_universe = root_universe
def add_volume_information(self, volume_calc):
"""Add volume information from a stochastic volume calculation.
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if volume_calc.domain_type == 'cell':
for cell in self.get_all_cells():
if cell.id in volume_calc.results:
cell.add_volume_information(volume_calc)
def export_to_xml(self, path='geometry.xml'):
"""Export geometry to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'geometry.xml'.
"""
# Clear OpenMC written IDs used to optimize XML generation
openmc.universe.WRITTEN_IDS = {}
# Create XML representation
geometry_file = ET.Element("geometry")
self.root_universe.create_xml_subelement(geometry_file)
# Clean the indentation in the file to be user-readable
sort_xml_elements(geometry_file)
clean_xml_indentation(geometry_file)
# Write the XML Tree to the geometry.xml file
tree = ET.ElementTree(geometry_file)
tree.write(path, xml_declaration=True, encoding='utf-8', method="xml")
def find(self, point):
"""Find cells/universes/lattices which contain a given point
Parameters
----------
point : 3-tuple of float
Cartesian coordinates of the point
Returns
-------
list
Sequence of universes, cells, and lattices which are traversed to
find the given point
"""
return self.root_universe.find(point)
def get_cell_instance(self, path):
"""Return the instance number for the final cell in a geometry path.
The instance is an index into tally distribcell filter arrays.
Parameters
----------
path : list
A list of IDs that form the path to the target. It should begin with
0 for the base universe, and should cover every universe, cell, and
lattice passed through. For the case of the lattice, a tuple should
be provided to indicate which coordinates in the lattice should be
entered. This should be in the form: (lat_id, i_x, i_y, i_z)
Returns
-------
instance : int
Index in tally results array for distribcell filters
"""
# Extract the cell id from the path
last_index = path.rfind('>')
cell_id = int(path[last_index+1:])
# Find the distribcell index of the cell.
cells = self.get_all_cells()
for cell in cells:
if cell.id == cell_id:
distribcell_index = cell.distribcell_index
break
else:
raise RuntimeError('Could not find cell {} specified in a \
distribcell filter'.format(cell_id))
# Return memoize'd offset if possible
if (path, distribcell_index) in self._offsets:
offset = self._offsets[(path, distribcell_index)]
# Begin recursive call to compute offset starting with the base Universe
else:
offset = self._root_universe.get_cell_instance(path,
distribcell_index)
self._offsets[(path, distribcell_index)] = offset
# Return the final offset
return offset
def get_all_cells(self):
"""Return all cells defined
Returns
-------
list of openmc.Cell
Cells in the geometry
"""
all_cells = self.root_universe.get_all_cells()
cells = list(set(all_cells.values()))
cells.sort(key=lambda x: x.id)
return cells
def get_all_universes(self):
"""Return all universes defined
Returns
-------
list of openmc.Universe
Universes in the geometry
"""
all_universes = self._root_universe.get_all_universes()
universes = list(set(all_universes.values()))
universes.sort(key=lambda x: x.id)
return universes
def get_all_materials(self):
"""Return all materials assigned to a cell
Returns
-------
list of openmc.Material
Materials in the geometry
"""
material_cells = self.get_all_material_cells()
materials = []
for cell in material_cells:
if cell.fill_type == 'distribmat':
for m in cell.fill:
if m is not None and m not in materials:
materials.append(m)
elif cell.fill_type == 'material':
if cell.fill not in materials:
materials.append(cell.fill)
materials.sort(key=lambda x: x.id)
return materials
def get_all_material_cells(self):
"""Return all cells filled by a material
Returns
-------
list of openmc.Cell
Cells filled by Materials in the geometry
"""
all_cells = self.get_all_cells()
material_cells = []
for cell in all_cells:
if cell.fill_type in ('material', 'distribmat'):
if cell not in material_cells:
material_cells.append(cell)
material_cells.sort(key=lambda x: x.id)
return material_cells
def get_all_material_universes(self):
"""Return all universes composed of at least one non-fill cell
Returns
-------
list of openmc.Universe
Universes with non-fill cells
"""
all_universes = self.get_all_universes()
material_universes = []
for universe in all_universes:
cells = universe.cells
for cell in cells:
if cell.fill_type in ('material', 'distribmat', 'void'):
if universe not in material_universes:
material_universes.append(universe)
material_universes.sort(key=lambda x: x.id)
return material_universes
def get_all_lattices(self):
"""Return all lattices defined
Returns
-------
list of openmc.Lattice
Lattices in the geometry
"""
cells = self.get_all_cells()
lattices = []
for cell in cells:
if cell.fill_type == 'lattice':
if cell.fill not in lattices:
lattices.append(cell.fill)
lattices.sort(key=lambda x: x.id)
return lattices
def get_materials_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of materials with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
material's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Material
Materials matching the queried name
"""
if not case_sensitive:
name = name.lower()
all_materials = self.get_all_materials()
materials = set()
for material in all_materials:
material_name = material.name
if not case_sensitive:
material_name = material_name.lower()
if material_name == name:
materials.add(material)
elif not matching and name in material_name:
materials.add(material)
materials = list(materials)
materials.sort(key=lambda x: x.id)
return materials
def get_cells_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of cells with matching names.
Parameters
----------
name : str
The name to search match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
cell's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Cell
Cells matching the queried name
"""
if not case_sensitive:
name = name.lower()
all_cells = self.get_all_cells()
cells = set()
for cell in all_cells:
cell_name = cell.name
if not case_sensitive:
cell_name = cell_name.lower()
if cell_name == name:
cells.add(cell)
elif not matching and name in cell_name:
cells.add(cell)
cells = list(cells)
cells.sort(key=lambda x: x.id)
return cells
def get_cells_by_fill_name(self, name, case_sensitive=False, matching=False):
"""Return a list of cells with fills with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
cell's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Cell
Cells with fills matching the queried name
"""
if not case_sensitive:
name = name.lower()
all_cells = self.get_all_cells()
cells = set()
for cell in all_cells:
cell_fill_name = cell.fill.name
if not case_sensitive:
cell_fill_name = cell_fill_name.lower()
if cell_fill_name == name:
cells.add(cell)
elif not matching and name in cell_fill_name:
cells.add(cell)
cells = list(cells)
cells.sort(key=lambda x: x.id)
return cells
def get_universes_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of universes with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
universe's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Universe
Universes matching the queried name
"""
if not case_sensitive:
name = name.lower()
all_universes = self.get_all_universes()
universes = set()
for universe in all_universes:
universe_name = universe.name
if not case_sensitive:
universe_name = universe_name.lower()
if universe_name == name:
universes.add(universe)
elif not matching and name in universe_name:
universes.add(universe)
universes = list(universes)
universes.sort(key=lambda x: x.id)
return universes
def get_lattices_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of lattices with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
lattice's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Lattice
Lattices matching the queried name
"""
if not case_sensitive:
name = name.lower()
all_lattices = self.get_all_lattices()
lattices = set()
for lattice in all_lattices:
lattice_name = lattice.name
if not case_sensitive:
lattice_name = lattice_name.lower()
if lattice_name == name:
lattices.add(lattice)
elif not matching and name in lattice_name:
lattices.add(lattice)
lattices = list(lattices)
lattices.sort(key=lambda x: x.id)
return lattices
|
|
#!/usr/local/bin/python3 -u
"""
SentientHome Application - based on Cement framework.
Author: Oliver Ratzesberger <https://github.com/fxstein>
Copyright: Copyright (C) 2017 Oliver Ratzesberger
License: Apache License, Version 2.0
"""
# Make sure we have access to SentientHome commons
import os
import platform
import subprocess
import sys
try:
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..')
except Exception:
exit(1)
# Sentient Home Application
from common.shutil import boolify
import configparser
import inspect
# Storage engine support - might move into a plugin at some point in time
from influxdb import InfluxDBClient
from cement.core.foundation import CementApp
from cement.ext.ext_colorlog import ColorLogHandler
from cement.ext.ext_configparser import ConfigParserConfigHandler
LOG_FORMAT = '%(asctime)s (%(levelname)s) %(namespace)s: %(message)s'
class shLogHandler(ColorLogHandler):
"""Setup default log handler."""
class Meta(object):
"""Define log format."""
file_format = LOG_FORMAT
console_format = LOG_FORMAT
debug_format = LOG_FORMAT
class shConfigHandler(ConfigParserConfigHandler):
"""Setup default config handler."""
class Meta(object):
"""Setup label."""
label = 'sh_config_handler'
def get(self, *args, **kw):
"""Fail mandatory but missing config settings."""
try:
return super(shConfigHandler, self).get(*args, **kw)
except configparser.Error as e:
self.app.log.fatal('Missing configuration setting: %s' % e)
self.app.close(1)
COLORS = {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'white,bg_red',
}
class shApp(CementApp):
"""SentientHome application."""
class Meta(object):
"""Setup defaults."""
config_files = ['~/.config/sentienthome/sentienthome.conf',
'/etc/sentienthome.conf']
extensions = ['colorlog']
arguments_override_config = True
# TODO(fxstein): reload_config is currently not supported as of Cement 2.6 due
# to the fact that pyinotify does not support OSX. Opened an issue with Cement
# to see if the python watchdog extension could be used instead. Disabling for
# now to avoid erroring out due to missing package
# extensions = ['reload_config', 'colorlog']
log_handler = shLogHandler(colors=COLORS)
config_handler = 'sh_config_handler'
handlers = [shConfigHandler]
def setup(self):
"""Perform application setup."""
# always run core setup first
super(shApp, self).setup()
# Interigate git for current version info
self._initVersion()
# Lets store who is using this module - used for filenames
(self._origin_pathname, self._origin_filename) =\
os.path.split(inspect.stack()[-1][1])
self._retries = (int)(self.config.get('SentientHome',
'retries',
fallback=10))
self._retry_interval = (float)(self.config.get('SentientHome',
'retry_interval',
fallback=2))
self._checkpointing = boolify(self.config.get('SentientHome',
'checkpointing',
fallback='OFF'))
# Setup event store and event engine configurations
self._setEventStore()
self._setEventEngine()
self._setListener()
# Once everything is setup log the application header
self._LogHeader()
def _initVersion(self, gitPath=None):
"""Initialize the version and GIT revision."""
if not gitPath:
gitPath = "git"
self.log.debug("Git path not specified, using system path.")
self._gitVersion = None
self._gitRevision = None
self._gitDirty = None
try:
self._gitVersion = subprocess.check_output(
[gitPath, "--version"],
stderr=subprocess.STDOUT).decode("utf-8").strip()
self._gitRevision = subprocess.check_output(
[gitPath, "describe", "--tags", "--always", "HEAD"],
stderr=subprocess.STDOUT).decode("utf-8").strip()
self._gitModifiedFiles = subprocess.check_output(
[gitPath, "status", "--porcelain"],
stderr=subprocess.STDOUT).decode("utf-8").splitlines()
self._gitDirty = True if self._gitModifiedFiles else False
except subprocess.CalledProcessError as e:
self.log.debug(
"Git information is not available: %s." %
e.output.decode("utf-8"))
except Exception as e:
self.log.debug("Git is not available: %s" % e)
def _LogHeader(self):
"""Log default logging header for App."""
# Output default logheader
self.log.info('#' * 78)
self.log.info('#')
self.log.info('# SentientHome')
self.log.info('#')
self.log.info('# Module: %s' % self._origin_filename)
self.log.info('# Path: %s' % self._origin_pathname)
self.log.info('# Revison: %s' % self._gitRevision)
self.log.info('# Dirty: %s' % self._gitDirty)
self.log.info('#')
self.log.info('# Git: %s' % self._gitVersion)
self.log.info('#')
self.log.info('# Python: %s' % platform.python_version())
# self.log.info('# Python Rev: %s' % platform.python_revision())
self.log.info('# Platform: %s' % platform.platform())
self.log.info('# Node: %s' % platform.node())
# self.log.info('# System: %s' % platform.system())
# self.log.info('# Release: %s' % platform.release())
# self.log.info('# Version: %s' % platform.version())
self.log.info('#')
self.log.info('# Retries: %s' % self._retries)
self.log.info('# Interval: %ss' % self._retry_interval)
self.log.info('# Checkpointing: %s' % self._checkpointing)
self.log.info('#')
self.log.info('# Event Store: %s' % self._event_store)
self.log.info('# Host: %s' % self._event_store_host)
self.log.info('# Port: %s' % self._event_store_port)
self.log.info('# User: %s' % self._event_store_user)
self.log.info('# Database: %s' % self._event_store_db)
self.log.info('# Info: %s' % self._event_store_info)
self.log.info('#')
self.log.info('# Event Engine: %s' % self._event_engine)
self.log.info('# Host: %s' % self._event_engine_addr)
self.log.info('# Port: %s' % self._event_engine_port)
self.log.info('# Info: %s' % self._event_engine_path_safe)
self.log.info('#')
self.log.info('# Event Listener: %s' % self._event_listener)
self.log.info('# Host: %s' % self._listener_path)
self.log.info('#')
self.log.info('#' * 78)
def _setEventStore(self):
config = self.config
self._event_store = config.get('SentientHome', 'event_store',
fallback='OFF')
self._event_store_host = None
self._event_store_port = None
self._event_store_db = None
self._event_store_user = None
self._event_store_pass = None
self._event_store_client = None
self._event_store_info = None
if self._event_store == 'OFF':
self._event_store_active = 0
elif self._event_store == 'INFLUXDB':
self._event_store_active = 1
self._event_store_host = config.get('influxdb', 'influx_host')
self._event_store_port = config.get('influxdb', 'influx_port')
self._event_store_db = config.get('influxdb', 'influx_db')
self._event_store_user = config.get('influxdb', 'influx_user')
self._event_store_pass = config.get('influxdb', 'influx_pass')
try:
self._event_store_client =\
InfluxDBClient(host=self._event_store_host,
port=self._event_store_port,
username=self._event_store_user,
password=self._event_store_pass,
database=self._event_store_db)
except Exception as e:
self.log.fatal(e)
self.log.fatal('Exception creating InfluxDB client: %s' %
self._event_store_info)
self._app.close(1)
# safe event store path without password
# can be used for reporting and general debugging
self._event_store_info =\
self._event_store_host + ':' + \
self._event_store_port + ';db=' + \
self._event_store_db + ';user=' + \
self._event_store_user
else:
self.log.fatal('Unsupported event store: %s' % self._event_store)
self.close(1)
self.log.debug('Event store @: %s' % self._event_store_info)
pass
def _setEventEngine(self):
config = self.config
self._event_engine = config.get('SentientHome', 'event_engine',
fallback='OFF')
self._event_engine_active = 0
self._event_engine_addr = None
self._event_engine_port = None
self._event_engine_path_safe = None
self._event_engine_path = None
if self._event_engine == 'ON':
self._event_engine_active = 1
self._event_engine_addr = config.get('SentientHome', 'event_addr')
self._event_engine_port = config.get('SentientHome', 'event_port')
self._event_engine_path_safe = \
self._event_engine_addr + ':' + \
self._event_engine_port + \
config.get('SentientHome', 'event_path')
# TODO(fxstein): Add authentication to event engine
self._event_engine_path = self._event_engine_path_safe
self.log.debug('Event engine @: %s' % self._event_engine_path_safe)
def _setListener(self):
config = self.config
self._event_listener = config.get('SentientHome', 'listener',
fallback='OFF')
self._listener_active = 0
self._listener_path = None
self._listener_auth = None
if self._event_listener == 'ON':
self._listener_active = 1
self._listener_path = config.get('SentientHome', 'listener_addr')
api_key = config.get('SentientHome', 'listener_api_key')
self._listener_auth = {"Authorization": "token %s" % api_key}
self.log.debug('Listener @: %s' % self._listener_path)
@property
def retries(self):
"""Number of retries before we should give up."""
return self._retries
@property
def retry_interval(self):
"""Retry interval in seconds."""
return self._retry_interval
@property
def checkpointing(self):
"""Checkpointing flag."""
return self._checkpointing
@property
def event_store(self):
"""Event store address."""
return self._event_store
@property
def event_store_active(self):
"""Event store flag."""
return self._event_store_active
@property
def event_store_path_safe(self):
"""Event store path without confidential info for e.g. logging."""
return self._event_store_path_safe
@property
def event_store_path(self):
"""Event store path including all data."""
return self._event_store_path
@property
def event_engine_active(self):
"""Event engine flag."""
return self._event_engine_active
@property
def event_engine_path_safe(self):
"""Event engine path without confidential info for e.g. logging."""
return self._event_engine_path_safe
@property
def event_engine_path(self):
"""Event engine path including all data."""
return self._event_engine_path
@property
def origin_filename(self):
"""Origin file name for logging."""
return self._origin_filename
@property
def origin_pathname(self):
"""Origin path name for logging."""
return self._origin_pathname
#
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
|
|
#!/usr/bin/env python
"""This tool builds or repacks the client binaries.
This handles invocations for the build across the supported platforms including
handling Visual Studio, pyinstaller and other packaging mechanisms.
"""
import os
import platform
import sys
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.lib import build
from grr.lib import builders
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import startup
parser = flags.PARSER
if "32 bit" in sys.version:
default_arch = "i386"
else:
default_arch = "amd64"
parser.add_argument(
"--platform", choices=["darwin", "linux", "windows"],
default=platform.system().lower(),
help="The platform to build or repack for. This will default to "
"the current platform: %s." % platform.system())
parser.add_argument(
"--arch", choices=["amd64", "i386"],
default=default_arch,
help="The architecture to build or repack for.")
parser.add_argument(
"--package_format", choices=["deb", "rpm"],
default="deb",
help="The packaging format to use when building a Linux client.")
# Initialize sub parsers and their arguments.
subparsers = parser.add_subparsers(
title="subcommands", dest="subparser_name", description="valid subcommands")
# Build arguments.
parser_build = subparsers.add_parser(
"build", help="Build a client from source.")
parser_repack = subparsers.add_parser(
"repack", help="Repack a zip file into an installer (Only useful when "
"signing).")
parser_repack.add_argument("--template", default=None,
help="The template zip file to repack.")
parser_repack.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_repack.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_repack.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_repack.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy = subparsers.add_parser(
"deploy", help="Build a deployable self installer from a package.")
parser_deploy.add_argument("--template", default=None,
help="The template zip file to deploy.")
parser_deploy.add_argument("--templatedir", default="",
help="Directory containing template zip files to "
"repack. Incompatible with --template")
parser_deploy.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_deploy.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_deploy.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
args = parser.parse_args()
def GetBuilder(context):
"""Get the appropriate builder based on the selected flags."""
try:
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
builder_obj = builders.DarwinClientBuilder
elif args.platform == "windows":
context = ["Platform:Windows"] + context
builder_obj = builders.WindowsClientBuilder
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
builder_obj = builders.LinuxClientBuilder
else:
context = ["Platform:Linux", "Target:LinuxRpm"] + context
builder_obj = builders.CentosClientBuilder
else:
parser.error("Unsupported build platform: %s" % args.platform)
except AttributeError:
raise RuntimeError("Unable to build for platform %s when running "
"on current platform." % args.platform)
return builder_obj(context=context)
def GetDeployer(context):
"""Get the appropriate client deployer based on the selected flags."""
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
deployer_obj = build.DarwinClientDeployer
elif args.platform == "windows":
context = ["Platform:Windows"] + context
deployer_obj = build.WindowsClientDeployer
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
deployer_obj = build.LinuxClientDeployer
else:
context = ["Platform:Linux", "Target:LinuxRpm"] + context
deployer_obj = build.CentosClientDeployer
else:
parser.error("Unsupported build platform: %s" % args.platform)
return deployer_obj(context=context)
def TemplateInputFilename(context):
"""Build template file name from config."""
if args.templatedir:
filename = config_lib.CONFIG.Get("PyInstaller.template_filename",
context=context)
return os.path.join(args.templatedir, filename)
return None
def main(_):
"""Launch the appropriate builder."""
config_lib.CONFIG.AddContext(
"ClientBuilder Context",
"Context applied when we run the client builder script.")
startup.ClientInit()
# The following is used to change the identity of the builder based on the
# target platform.
context = flags.FLAGS.context
if args.arch == "amd64":
context.append("Arch:amd64")
else:
context.append("Arch:i386")
if args.subparser_name == "build":
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate()
elif args.subparser_name == "repack":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
output_filename = os.path.join(
args.outputdir, config_lib.CONFIG.Get(
"ClientBuilder.output_filename", context=deployer.context))
deployer.RepackInstaller(open(args.template, "rb").read(), args.output or
output_filename)
elif args.subparser_name == "deploy":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
template_path = (args.template or TemplateInputFilename(deployer.context) or
config_lib.CONFIG.Get("ClientBuilder.template_path",
context=deployer.context))
# If neither output filename or output directory is specified,
# use the default location from the config file.
output = None
if args.output:
output = args.output
elif args.outputdir:
# If output filename isn't specified, write to args.outputdir with a
# .deployed extension so we can distinguish it from repacked binaries.
filename = ".".join(
(config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context), "deployed"))
output = os.path.join(args.outputdir, filename)
deployer.MakeDeployableBinary(template_path, output)
if __name__ == "__main__":
flags.StartMain(main)
|
|
"""Console script entry point for AutoNetkit"""
import os
import random
import time
import sys
import traceback
from datetime import datetime
import autonetkit.ank_json as ank_json
import autonetkit.ank_messaging as ank_messaging
import autonetkit.config as config
import autonetkit.log as log
import autonetkit.render as render
import pkg_resources
from autonetkit import update_http
from autonetkit.nidb import NIDB
# TODO: make if measure set, then not compile - or warn if both set, as
# don't want to regen topology when measuring
try:
ANK_VERSION = pkg_resources.get_distribution("autonetkit").version
except pkg_resources.DistributionNotFound:
ANK_VERSION = "dev"
def file_monitor(filename):
"""Generator based function to check if a file has changed"""
last_timestamp = os.stat(filename).st_mtime
while True:
timestamp = os.stat(filename).st_mtime
if timestamp > last_timestamp:
last_timestamp = timestamp
yield True
yield False
def manage_network(input_graph_string, timestamp, build_options, reload_build=False, grid=None):
"""Build, compile, render network as appropriate"""
# import build_network_simple as build_network
import autonetkit.build_network as build_network
if reload_build:
# remap?
build_network = reload(build_network)
if build_options['build']:
if input_graph_string:
graph = build_network.load(input_graph_string)
elif grid:
graph = build_network.grid_2d(grid)
anm = build_network.build(graph)
if not build_options['compile']:
update_http(anm)
if build_options['validate']:
import autonetkit.ank_validate
try:
autonetkit.ank_validate.validate(anm)
except Exception, e:
log.warning("Unable to validate topologies: %s" % e)
if build_options['compile']:
if build_options['archive']:
anm.save()
nidb = compile_network(anm)
update_http(anm, nidb)
log.debug("Sent ANM to web server")
if build_options['archive']:
nidb.save()
# render.remove_dirs(["rendered"])
if build_options['render']:
render.render(nidb)
if not(build_options['build'] or build_options['compile']):
# Load from last run
import autonetkit.anm
anm = autonetkit.anm.AbstractNetworkModel()
anm.restore_latest()
nidb = NIDB()
nidb.restore_latest()
update_http(anm, nidb)
if build_options['diff']:
import autonetkit.diff
nidb_diff = autonetkit.diff.nidb_diff()
import json
data = json.dumps(nidb_diff, cls=ank_json.AnkEncoder, indent=4)
log.info("Wrote diff to diff.json")
# TODO: make file specified in config
with open("diff.json", "w") as fh:
fh.write(data)
if build_options['deploy']:
deploy_network(anm, nidb, input_graph_string)
if build_options['measure']:
measure_network(anm, nidb)
log.info("Finished")
def parse_options(argument_string=None):
"""Parse user-provided options"""
import argparse
usage = "autonetkit -f input.graphml"
version = "%(prog)s " + str(ANK_VERSION)
parser = argparse.ArgumentParser(description=usage, version=version)
input_group = parser.add_mutually_exclusive_group()
input_group.add_argument(
'--file', '-f', default=None, help="Load topology from FILE")
input_group.add_argument('--stdin', action="store_true", default=False,
help="Load topology from STDIN")
parser.add_argument(
'--monitor', '-m', action="store_true", default=False,
help="Monitor input file for changes")
parser.add_argument('--debug', action="store_true",
default=False, help="Debug mode")
parser.add_argument('--quiet', action="store_true",
default=False, help="Quiet mode (only display warnings and errors)")
parser.add_argument('--diff', action="store_true", default=False,
help="Diff NIDB")
parser.add_argument('--compile', action="store_true",
default=False, help="Compile")
parser.add_argument(
'--build', action="store_true", default=False, help="Build")
parser.add_argument(
'--render', action="store_true", default=False, help="Compile")
parser.add_argument(
'--validate', action="store_true", default=False, help="Validate")
parser.add_argument('--deploy', action="store_true",
default=False, help="Deploy")
parser.add_argument('--archive', action="store_true", default=False,
help="Archive ANM, NIDB, and IP allocations")
parser.add_argument('--measure', action="store_true",
default=False, help="Measure")
parser.add_argument(
'--webserver', action="store_true", default=False, help="Webserver")
parser.add_argument('--grid', type=int, help="Grid Size (n * n)")
parser.add_argument(
'--target', choices=['netkit', 'cisco'], default=None)
parser.add_argument(
'--vis_uuid', default=None, help="UUID for multi-user visualisation")
if argument_string:
arguments = parser.parse_args(argument_string.split())
else:
# from command line arguments
arguments = parser.parse_args()
return arguments
def main(options):
settings = config.settings
if options.vis_uuid:
config.settings['Http Post']['uuid'] = options.vis_uuid
try:
# test if can import, if not present will fail and not add to template
# path
import autonetkit_cisco
except ImportError:
pass
else:
import autonetkit_cisco.version
version_banner = autonetkit_cisco.version.banner()
log.info("%s" % version_banner)
log.info("AutoNetkit %s" % ANK_VERSION)
if options.target == "cisco":
# output target is Cisco
log.info("Setting output target as Cisco")
settings['Graphml']['Node Defaults']['platform'] = "VIRL"
settings['Graphml']['Node Defaults']['host'] = "internal"
settings['Graphml']['Node Defaults']['syntax'] = "ios_xr"
settings['Compiler']['Cisco']['to memory'] = 1
settings['General']['deploy'] = 1
settings['Deploy Hosts']['internal'] = {'VIRL':
{'deploy': 1}}
if options.debug or settings['General']['debug']:
# TODO: fix this
import logging
logger = logging.getLogger("ANK")
logger.setLevel(logging.DEBUG)
if options.quiet or settings['General']['quiet']:
import logging
logger = logging.getLogger("ANK")
logger.setLevel(logging.WARNING)
build_options = {
'compile': options.compile or settings['General']['compile'],
'render': options.render or settings['General']['render'],
'validate': options.validate or settings['General']['validate'],
'build': options.build or settings['General']['build'],
'deploy': options.deploy or settings['General']['deploy'],
'measure': options.measure or settings['General']['measure'],
'monitor': options.monitor or settings['General']['monitor'],
'diff': options.diff or settings['General']['diff'],
'archive': options.archive or settings['General']['archive'],
}
if options.webserver:
log.info("Webserver not yet supported, please run as seperate module")
if options.file:
with open(options.file, "r") as fh:
input_string = fh.read()
timestamp = os.stat(options.file).st_mtime
elif options.stdin:
import sys
input_string = sys.stdin
now = datetime.now()
timestamp = now.strftime("%Y%m%d_%H%M%S_%f")
elif options.grid:
input_string = ""
now = datetime.now()
timestamp = now.strftime("%Y%m%d_%H%M%S_%f")
else:
log.info("No input file specified. Exiting")
raise SystemExit
try:
manage_network(input_string, timestamp,
build_options=build_options, grid=options.grid)
except Exception, err:
log.error(
"Error generating network configurations: %s. More information may be available in the debug log." % err)
log.debug("Error generating network configurations", exc_info=True)
if settings['General']['stack_trace']:
print traceback.print_exc()
import sys
sys.exit("Unable to build configurations.")
# TODO: work out why build_options is being clobbered for monitor mode
build_options['monitor'] = options.monitor or settings['General'][
'monitor']
if build_options['monitor']:
try:
log.info("Monitoring for updates...")
input_filemonitor = file_monitor(options.file)
#build_filemonitor = file_monitor("autonetkit/build_network.py")
while True:
time.sleep(1)
rebuild = False
reload_build = False
if input_filemonitor.next():
rebuild = True
# if build_filemonitor.next():
#reload_build = True
#rebuild = True
if rebuild:
try:
log.info("Input graph updated, recompiling network")
with open(options.file, "r") as fh:
input_string = fh.read() # read updates
manage_network(input_string,
timestamp, build_options, reload_build)
log.info("Monitoring for updates...")
except Exception, e:
log.warning("Unable to build network %s" % e)
traceback.print_exc()
except KeyboardInterrupt:
log.info("Exiting")
def create_nidb(anm):
nidb = NIDB()
g_phy = anm['phy']
g_ip = anm['ip']
g_graphics = anm['graphics']
nidb.add_nodes_from(
g_phy, retain=['label', 'host', 'platform', 'Network', 'update', 'asn'])
cd_nodes = [n for n in g_ip.nodes(
"broadcast_domain") if not n.is_switch()] # Only add created cds - otherwise overwrite host of switched
nidb.add_nodes_from(
cd_nodes, retain=['label', 'host'], broadcast_domain=True)
for node in nidb.nodes("broadcast_domain"):
ipv4_node = anm['ipv4'].node(node)
if ipv4_node:
node.ipv4_subnet = ipv4_node.subnet
#TODO: copy across IPv6 seperately
node.ipv6_subnet = ipv4_node['ipv6'].subnet
# add edges to switches
edges_to_add = [edge for edge in g_phy.edges()
if edge.src.is_switch() or edge.dst.is_switch()]
# cd edges from split
edges_to_add += [edge for edge in g_ip.edges() if edge.split]
nidb.add_edges_from(edges_to_add)
nidb.copy_graphics(g_graphics)
return nidb
def compile_network(anm):
nidb = create_nidb(anm)
g_phy = anm['phy']
for target, target_data in config.settings['Compile Targets'].items():
host = target_data['host']
platform = target_data['platform']
if platform == "netkit":
import autonetkit.compilers.platform.netkit as pl_netkit
platform_compiler = pl_netkit.NetkitCompiler(nidb, anm, host)
elif platform == "VIRL":
try:
import autonetkit_cisco.compilers.platform.cisco as pl_cisco
platform_compiler = pl_cisco.CiscoCompiler(nidb, anm, host)
except ImportError, e:
print e
log.warning("Unable to load VIRL platform compiler")
elif platform == "dynagen":
import autonetkit.compilers.platform.dynagen as pl_dynagen
platform_compiler = pl_dynagen.DynagenCompiler(nidb, anm, host)
elif platform == "junosphere":
import autonetkit.compilers.platform.junosphere as pl_junosphere
platform_compiler = pl_junosphere.JunosphereCompiler(
nidb, anm, host)
if any(g_phy.nodes(host=host, platform=platform)):
log.info("Compiling configurations for %s on %s" % (platform, host))
platform_compiler.compile() # only compile if hosts set
else:
log.debug("No devices set for %s on %s" % (platform, host))
return nidb
def deploy_network(anm, nidb, input_graph_string=None):
log.info("Deploying Network")
deploy_hosts = config.settings['Deploy Hosts']
for hostname, host_data in deploy_hosts.items():
for platform, platform_data in host_data.items():
if not any(nidb.nodes(host=hostname, platform=platform)):
log.debug("No hosts for (host, platform) (%s, %s), skipping deployment"
% (hostname, platform))
continue
if not platform_data['deploy']:
log.debug("Not deploying to %s on %s" % (platform, hostname))
continue
config_path = os.path.join("rendered", hostname, platform)
if hostname == "internal":
try:
from autonetkit_cisco import deploy as cisco_deploy
except ImportError:
pass # development module, may not be available
if platform == "VIRL":
create_new_xml = False
if not input_graph_string:
create_new_xml = True # no input, eg if came from grid
elif anm['input'].data['file_type'] == "graphml":
create_new_xml = True # input from graphml, create XML
if create_new_xml:
cisco_deploy.create_xml(anm, nidb, input_graph_string)
else:
cisco_deploy.package(nidb, config_path,
input_graph_string)
continue
username = platform_data['username']
key_file = platform_data['key_file']
host = platform_data['host']
if platform == "netkit":
import autonetkit.deploy.netkit as netkit_deploy
tar_file = netkit_deploy.package(config_path, "nklab")
netkit_deploy.transfer(
host, username, tar_file, tar_file, key_file)
netkit_deploy.extract(host, username, tar_file,
config_path, timeout=60, key_filename=key_file,
parallel_count=10)
if platform == "VIRL":
# TODO: check why using nklab here
cisco_deploy.package(config_path, "nklab")
def console_entry():
"""If come from console entry point"""
args = parse_options()
main(args)
if __name__ == "__main__":
try:
args = parse_options()
main(args)
except KeyboardInterrupt:
pass
|
|
import copy
import logging
import collections
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.chop as s_chop
import synapse.lib.time as s_time
import synapse.lib.layer as s_layer
import synapse.lib.stormtypes as s_stormtypes
logger = logging.getLogger(__name__)
class Node:
'''
A Cortex hypergraph node.
NOTE: This object is for local Cortex use during a single Xact.
'''
def __init__(self, snap, sode, bylayer=None):
self.snap = snap
self.sode = sode
self.buid = sode[0]
# Tracks which property is retrieved from which layer
self.bylayer = bylayer
# if set, the node is complete.
self.ndef = sode[1].get('ndef')
self.form = snap.core.model.form(self.ndef[0])
self.props = sode[1].get('props')
if self.props is None:
self.props = {}
self.tags = sode[1].get('tags')
if self.tags is None:
self.tags = {}
self.tagprops = sode[1].get('tagprops')
if self.tagprops is None:
self.tagprops = {}
self.nodedata = sode[1].get('nodedata')
if self.nodedata is None:
self.nodedata = {}
async def getStorNodes(self):
'''
Return a list of the raw storage nodes for each layer.
'''
return await self.snap.view.getStorNodes(self.buid)
def getByLayer(self):
'''
Return a dictionary that translates the node's bylayer dict to a primitive.
'''
ndef = self.bylayer.get('ndef')
tags = {t: l for (t, l) in self.bylayer.get('tags', {}).items()}
props = {p: l for (p, l) in self.bylayer.get('props', {}).items()}
tagprops = {p: l for (p, l) in self.bylayer.get('tagprops', {}).items()}
return {'ndef': ndef, 'props': props, 'tags': tags, 'tagprops': tagprops}
def __repr__(self):
return f'Node{{{self.pack()}}}'
async def addEdge(self, verb, n2iden):
if self.form.isrunt:
mesg = f'Edges cannot be used with runt nodes: {self.form.full}'
raise s_exc.IsRuntForm(mesg=mesg, form=self.form.full)
async with self.snap.getNodeEditor(self) as editor:
return await editor.addEdge(verb, n2iden)
async def delEdge(self, verb, n2iden):
if not s_common.isbuidhex(n2iden):
mesg = f'delEdge() got an invalid node iden: {n2iden}'
raise s_exc.BadArg(mesg=mesg)
nodeedits = (
(self.buid, self.form.name, (
(s_layer.EDIT_EDGE_DEL, (verb, n2iden), ()),
)),
)
await self.snap.applyNodeEdits(nodeedits)
async def iterEdgesN1(self, verb=None):
async for edge in self.snap.iterNodeEdgesN1(self.buid, verb=verb):
yield edge
async def iterEdgesN2(self, verb=None):
async for edge in self.snap.iterNodeEdgesN2(self.buid, verb=verb):
yield edge
async def storm(self, runt, text, opts=None, path=None):
'''
Args:
path (Path):
If set, then vars from path are copied into the new runtime, and vars are copied back out into path
at the end
Note:
If opts is not None and opts['vars'] is set and path is not None, then values of path vars take precedent
'''
query = await self.snap.core.getStormQuery(text)
if opts is None:
opts = {}
opts.setdefault('vars', {})
if path is not None:
opts['vars'].update(path.vars)
async with runt.getSubRuntime(query, opts=opts) as subr:
subr.addInput(self)
async for subn, subp in subr.execute():
yield subn, subp
if path is not None:
path.vars.update(subr.vars)
async def filter(self, runt, text, opts=None, path=None):
async for item in self.storm(runt, text, opts=opts, path=path):
return False
return True
def iden(self):
return s_common.ehex(self.buid)
def pack(self, dorepr=False):
'''
Return the serializable/packed version of the node.
Args:
dorepr (bool): Include repr information for human readable versions of properties.
Returns:
(tuple): An (ndef, info) node tuple.
'''
node = (self.ndef, {
'iden': self.iden(),
'tags': self.tags,
'props': self.props,
'tagprops': self.tagprops,
'nodedata': self.nodedata,
})
if dorepr:
rval = self.repr()
if rval is not None and rval != self.ndef[1]:
node[1]['repr'] = self.repr()
node[1]['reprs'] = self.reprs()
node[1]['tagpropreprs'] = self.tagpropreprs()
return node
async def getEmbeds(self, embeds):
'''
Return a dictionary of property embeddings.
'''
retn = {}
cache = {}
async def walk(n, p):
valu = n.props.get(p)
if valu is None:
return None
prop = n.form.prop(p)
if prop is None:
return None
if prop.modl.form(prop.type.name) is None:
return None
buid = s_common.buid((prop.type.name, valu))
step = cache.get(buid, s_common.novalu)
if step is s_common.novalu:
step = cache[buid] = await node.snap.getNodeByBuid(buid)
return step
for nodepath, relprops in embeds.items():
steps = nodepath.split('::')
node = self
for propname in steps:
node = await walk(node, propname)
if node is None:
break
if node is None:
continue
embdnode = retn.get(nodepath)
if embdnode is None:
embdnode = retn[nodepath] = {}
embdnode['*'] = s_common.ehex(node.buid)
for relp in relprops:
embdnode[relp] = node.props.get(relp)
return retn
async def seen(self, tick, source=None):
'''
Update the .seen interval and optionally a source specific seen node.
'''
await self.set('.seen', tick)
if source is not None:
seen = await self.snap.addNode('meta:seen', (source, self.ndef))
await seen.set('.seen', tick)
def getNodeRefs(self):
'''
Return a list of (prop, (form, valu)) refs out for the node.
'''
retn = []
refs = self.form.getRefsOut()
for name, dest in refs.get('prop', ()):
valu = self.props.get(name)
if valu is None:
continue
retn.append((name, (dest, valu)))
for name in refs.get('ndef', ()):
valu = self.props.get(name)
if valu is None:
continue
retn.append((name, valu))
for name, dest in refs.get('array', ()):
valu = self.props.get(name)
if valu is None:
continue
for item in valu:
retn.append((name, (dest, item)))
return retn
async def set(self, name, valu, init=False):
'''
Set a property on the node.
Args:
name (str): The name of the property.
valu (obj): The value of the property.
init (bool): Set to True to disable read-only enforcement
Returns:
(bool): True if the property was changed.
'''
if self.snap.readonly:
mesg = 'Cannot set property in read-only mode.'
raise s_exc.IsReadOnly(mesg=mesg)
prop = self.form.props.get(name)
if prop is None:
mesg = f'No property named {name} on form {self.form.name}.'
await self.snap._raiseOnStrict(s_exc.NoSuchProp, mesg)
return False
if self.form.isrunt:
if prop.info.get('ro'):
mesg = 'Cannot set read-only props on runt nodes'
raise s_exc.IsRuntForm(mesg=mesg, form=self.form.full, prop=name, valu=valu)
await self.snap.core.runRuntPropSet(self, prop, valu)
return True
async with self.snap.getNodeEditor(self) as editor:
return await editor.set(name, valu)
def has(self, name):
return name in self.props
def get(self, name):
'''
Return a secondary property value from the Node.
Args:
name (str): The name of a secondary property.
Returns:
(obj): The secondary property value or None.
'''
if name.startswith('#'):
return self.tags.get(name[1:])
return self.props.get(name)
async def _getPropDelEdits(self, name, init=False):
prop = self.form.prop(name)
if prop is None:
if self.snap.strict:
mesg = f'No property named {name}.'
raise s_exc.NoSuchProp(mesg=mesg, name=name, form=self.form.name)
await self.snap.warn(f'No Such Property: {name}')
return ()
if not init:
if prop.info.get('ro'):
if self.snap.strict:
raise s_exc.ReadOnlyProp(name=name)
await self.snap.warn(f'Property is read-only: {name}')
return ()
curv = self.props.get(name, s_common.novalu)
if curv is s_common.novalu:
return ()
edits = (
(s_layer.EDIT_PROP_DEL, (prop.name, None, prop.type.stortype), ()),
)
return edits
async def pop(self, name, init=False):
'''
Remove a property from a node and return the value
'''
if self.form.isrunt:
prop = self.form.prop(name)
if prop.info.get('ro'):
raise s_exc.IsRuntForm(mesg='Cannot delete read-only props on runt nodes',
form=self.form.full, prop=name)
return await self.snap.core.runRuntPropDel(self, prop)
edits = await self._getPropDelEdits(name, init=init)
if not edits:
return False
await self.snap.applyNodeEdit((self.buid, self.form.name, edits))
self.props.pop(name, None)
return True
def repr(self, name=None, defv=None):
if name is None:
return self.form.type.repr(self.ndef[1])
prop = self.form.props.get(name)
if prop is None:
mesg = f'No property named {name}.'
raise s_exc.NoSuchProp(mesg=mesg, form=self.form.name, prop=name)
valu = self.props.get(name)
if valu is None:
return defv
return prop.type.repr(valu)
def reprs(self):
'''
Return a dictionary of repr values for props whose repr is different than
the system mode value.
'''
reps = {}
for name, valu in self.props.items():
prop = self.form.prop(name)
if prop is None:
continue
rval = prop.type.repr(valu)
if rval is None or rval == valu:
continue
reps[name] = rval
return reps
def tagpropreprs(self):
'''
Return a dictionary of repr values for tagprops whose repr is different than
the system mode value.
'''
reps = collections.defaultdict(dict)
for tag, propdict in self.tagprops.items():
for name, valu in propdict.items():
prop = self.form.modl.tagprop(name)
if prop is None:
continue
rval = prop.type.repr(valu)
if rval is None or rval == valu:
continue
reps[tag][name] = rval
return dict(reps)
def hasTag(self, name):
name = s_chop.tag(name)
return name in self.tags
def getTag(self, name, defval=None):
name = s_chop.tag(name)
return self.tags.get(name, defval)
def getTags(self, leaf=False):
if not leaf:
return list(self.tags.items())
# longest first
retn = []
# brute force rather than build a tree. faster in small sets.
for _, tag, valu in sorted([(len(t), t, v) for (t, v) in self.tags.items()], reverse=True):
look = tag + '.'
if any([r.startswith(look) for (r, rv) in retn]):
continue
retn.append((tag, valu))
return retn
async def addTag(self, tag, valu=(None, None)):
'''
Add a tag to a node.
Args:
tag (str): The tag to add to the node.
valu: The optional tag value. If specified, this must be a value that
norms as a valid time interval as an ival.
Returns:
None: This returns None.
'''
if self.form.isrunt:
raise s_exc.IsRuntForm(mesg='Cannot add tags to runt nodes.',
form=self.form.full, tag=tag)
async with self.snap.getNodeEditor(self) as protonode:
await protonode.addTag(tag, valu=valu)
def _getTagTree(self):
root = (None, {})
for tag in self.tags.keys():
node = root
for part in tag.split('.'):
kidn = node[1].get(part)
if kidn is None:
full = part
if node[0] is not None:
full = f'{node[0]}.{full}'
kidn = node[1][part] = (full, {})
node = kidn
return root
async def _getTagDelEdits(self, tag, init=False):
path = s_chop.tagpath(tag)
name = '.'.join(path)
if self.form.isrunt:
raise s_exc.IsRuntForm(mesg='Cannot delete tags from runt nodes.',
form=self.form.full, tag=tag)
curv = self.tags.get(name, s_common.novalu)
if curv is s_common.novalu:
return ()
pref = name + '.'
todel = [(len(t), t) for t in self.tags.keys() if t.startswith(pref)]
if len(path) > 1:
parent = '.'.join(path[:-1])
# retrieve a list of prunable tags
prune = await self.snap.core.getTagPrune(parent)
if prune:
tree = self._getTagTree()
for prunetag in reversed(prune):
node = tree
for step in prunetag.split('.'):
node = node[1].get(step)
if node is None:
break
if node is not None and len(node[1]) == 1:
todel.append((len(node[0]), node[0]))
continue
break
todel.sort(reverse=True)
# order matters...
edits = []
for _, subtag in todel:
edits.extend(self._getTagPropDel(subtag))
edits.append((s_layer.EDIT_TAG_DEL, (subtag, None), ()))
edits.extend(self._getTagPropDel(name))
edits.append((s_layer.EDIT_TAG_DEL, (name, None), ()))
return edits
async def delTag(self, tag, init=False):
'''
Delete a tag from the node.
'''
edits = await self._getTagDelEdits(tag, init=init)
if edits:
nodeedit = (self.buid, self.form.name, edits)
await self.snap.applyNodeEdit(nodeedit)
def _getTagPropDel(self, tag):
edits = []
for tagprop in self.getTagProps(tag):
prop = self.snap.core.model.getTagProp(tagprop)
if prop is None: # pragma: no cover
logger.warn(f'Cant delete tag prop ({tagprop}) without model prop!')
continue
edits.append((s_layer.EDIT_TAGPROP_DEL, (tag, tagprop, None, prop.type.stortype), ()))
return edits
def getTagProps(self, tag):
propdict = self.tagprops.get(tag)
if not propdict:
return []
return list(propdict.keys())
def hasTagProp(self, tag, prop):
'''
Check if a #foo.bar:baz tag property exists on the node.
'''
return tag in self.tagprops and prop in self.tagprops[tag]
def getTagProp(self, tag, prop, defval=None):
'''
Return the value (or defval) of the given tag property.
'''
propdict = self.tagprops.get(tag)
if propdict:
return propdict.get(prop, defval)
return defval
async def setTagProp(self, tag, name, valu):
'''
Set the value of the given tag property.
'''
async with self.snap.getNodeEditor(self) as editor:
await editor.setTagProp(tag, name, valu)
async def delTagProp(self, tag, name):
prop = self.snap.core.model.getTagProp(name)
if prop is None:
raise s_exc.NoSuchTagProp(name=name)
propdict = self.tagprops.get(tag)
if not propdict:
return False
curv = propdict.get(name, s_common.novalu)
if curv is s_common.novalu:
return False
edits = (
(s_layer.EDIT_TAGPROP_DEL, (tag, name, None, prop.type.stortype), ()),
)
await self.snap.applyNodeEdit((self.buid, self.form.name, edits))
async def delete(self, force=False):
'''
Delete a node from the cortex.
The following tear-down operations occur in order:
* validate that you have permissions to delete the node
* validate that you have permissions to delete all tags
* validate that there are no remaining references to the node.
* delete all the tags (bottom up)
* fire onDelTag() handlers
* delete tag properties from storage
* log tag:del splices
* delete all secondary properties
* fire onDelProp handler
* delete secondary property from storage
* log prop:del splices
* delete the primary property
* fire onDel handlers for the node
* delete primary property from storage
* log node:del splices
'''
formname, formvalu = self.ndef
if self.form.isrunt:
raise s_exc.IsRuntForm(mesg='Cannot delete runt nodes',
form=formname, valu=formvalu)
# top level tags will cause delete cascades
tags = [t for t in self.tags.keys() if len(t.split('.')) == 1]
# check for any nodes which reference us...
if not force:
# refuse to delete tag nodes with existing tags
if self.form.name == 'syn:tag':
async for _ in self.snap.nodesByTag(self.ndef[1]): # NOQA
mesg = 'Nodes still have this tag.'
return await self.snap._raiseOnStrict(s_exc.CantDelNode, mesg, form=formname,
iden=self.iden())
async for refr in self.snap.nodesByPropTypeValu(formname, formvalu):
if refr.buid == self.buid:
continue
mesg = 'Other nodes still refer to this node.'
return await self.snap._raiseOnStrict(s_exc.CantDelNode, mesg, form=formname,
iden=self.iden())
edits = []
for tag in tags:
edits.extend(await self._getTagDelEdits(tag, init=True))
for name in self.props.keys():
edits.extend(await self._getPropDelEdits(name, init=True))
edits.append(
(s_layer.EDIT_NODE_DEL, (formvalu, self.form.type.stortype), ()),
)
await self.snap.applyNodeEdit((self.buid, formname, edits))
self.snap.livenodes.pop(self.buid, None)
async def hasData(self, name):
if name in self.nodedata:
return True
return await self.snap.hasNodeData(self.buid, name)
async def getData(self, name, defv=None):
valu = self.nodedata.get(name, s_common.novalu)
if valu is not s_common.novalu:
return valu
return await self.snap.getNodeData(self.buid, name, defv=defv)
async def setData(self, name, valu):
async with self.snap.getNodeEditor(self) as protonode:
await protonode.setData(name, valu)
async def popData(self, name):
retn = await self.snap.getNodeData(self.buid, name)
edits = (
(s_layer.EDIT_NODEDATA_DEL, (name, None), ()),
)
await self.snap.applyNodeEdits(((self.buid, self.form.name, edits),))
return retn
async def iterData(self):
async for item in self.snap.iterNodeData(self.buid):
yield item
class Path:
'''
A path context tracked through the storm runtime.
'''
def __init__(self, vars, nodes):
self.node = None
self.nodes = nodes
if len(nodes):
self.node = nodes[-1]
self.vars = vars
self.frames = []
self.ctors = {}
# "builtins" which are *not* vars
# ( this allows copying variable context )
self.builtins = {
'path': self,
'node': self.node,
}
self.metadata = {}
def getVar(self, name, defv=s_common.novalu):
# check if the name is in our variables
valu = self.vars.get(name, s_common.novalu)
if valu is not s_common.novalu:
return valu
# check if it's in builtins
valu = self.builtins.get(name, s_common.novalu)
if valu is not s_common.novalu:
return valu
ctor = self.ctors.get(name)
if ctor is not None:
valu = ctor(self)
self.vars[name] = valu
return valu
return s_common.novalu
async def setVar(self, name, valu):
self.vars[name] = valu
async def popVar(self, name):
return self.vars.pop(name, s_common.novalu)
def meta(self, name, valu):
'''
Add node specific metadata to be returned with the node.
'''
self.metadata[name] = valu
async def pack(self, path=False):
ret = dict(self.metadata)
if ret:
ret = await s_stormtypes.toprim(ret)
if path:
ret['nodes'] = [node.iden() for node in self.nodes]
return ret
def fork(self, node):
nodes = list(self.nodes)
nodes.append(node)
path = Path(self.vars.copy(), nodes)
return path
def clone(self):
path = Path(copy.copy(self.vars), copy.copy(self.nodes))
path.frames = [v.copy() for v in self.frames]
return path
def initframe(self, initvars=None):
framevars = {}
if initvars is not None:
framevars.update(initvars)
self.frames.append(self.vars)
self.vars = framevars
def finiframe(self):
'''
Pop a scope frame from the path, restoring runt if at the top
Args:
runt (Runtime): A storm runtime to restore if we're at the top
merge (bool): Set to true to merge vars back up into the next frame
'''
if not self.frames:
self.vars.clear()
return
self.vars = self.frames.pop()
def props(pode):
'''
Get the props from the node.
Args:
pode (tuple): A packed node.
Notes:
This will include any universal props present on the node.
Returns:
dict: A dictionary of properties.
'''
return pode[1]['props'].copy()
def prop(pode, prop):
'''
Return the valu of a given property on the node.
Args:
pode (tuple): A packed node.
prop (str): Property to retrieve.
Notes:
The prop argument may be the full property name (foo:bar:baz), relative property name (:baz) , or the unadorned
property name (baz).
Returns:
'''
form = pode[0][0]
if prop.startswith(form):
prop = prop[len(form):]
if prop[0] == ':':
prop = prop[1:]
return pode[1]['props'].get(prop)
def tags(pode, leaf=False):
'''
Get all the tags for a given node.
Args:
pode (tuple): A packed node.
leaf (bool): If True, only return leaf tags
Returns:
list: A list of tag strings.
'''
if not leaf:
return list(pode[1]['tags'].keys())
return _tagscommon(pode, True)
def tagsnice(pode):
'''
Get all the leaf tags and the tags that have values or tagprops.
Args:
pode (tuple): A packed node.
Returns:
list: A list of tag strings.
'''
ret = _tagscommon(pode, False)
for tag in pode[1].get('tagprops', {}):
if tag not in ret:
ret.append(tag)
return ret
def _tagscommon(pode, leafonly):
'''
Return either all the leaf tags or all the leaf tags and all the internal tags with values
'''
retn = []
# brute force rather than build a tree. faster in small sets.
for tag, val in sorted((t for t in pode[1]['tags'].items()), reverse=True, key=lambda x: len(x[0])):
look = tag + '.'
if (leafonly or val == (None, None)) and any([r.startswith(look) for r in retn]):
continue
retn.append(tag)
return retn
def tagged(pode, tag):
'''
Check if a packed node has a given tag.
Args:
pode (tuple): A packed node.
tag (str): The tag to check.
Examples:
Check if a node is tagged with "woot" and dostuff if it is.
if s_node.tagged(node,'woot'):
dostuff()
Notes:
If the tag starts with `#`, this is removed prior to checking.
Returns:
bool: True if the tag is present. False otherwise.
'''
if tag.startswith('#'):
tag = tag[1:]
return pode[1]['tags'].get(tag) is not None
def ndef(pode):
'''
Return a node definition (<form>,<valu>) tuple from the node.
Args:
pode (tuple): A packed node.
Returns:
((str,obj)): The (<form>,<valu>) tuple for the node
'''
return pode[0]
def iden(pode):
'''
Return the iden (buid) of the packed node.
Args:
pode (tuple): A packed node.
Returns:
str: The node iden.
'''
return pode[1].get('iden')
def reprNdef(pode):
'''
Get the ndef of the pode with a human readable value.
Args:
pode (tuple): A packed node.
Notes:
The human readable value is only available if the node came from a
storm query execution where the ``repr`` key was passed into the
``opts`` argument with a True value.
Returns:
(str, str): A tuple of form and the human readable value.
'''
((form, valu), info) = pode
formvalu = info.get('repr')
if formvalu is None:
formvalu = str(valu)
return form, formvalu
def reprProp(pode, prop):
'''
Get the human readable value for a secondary property from the pode.
Args:
pode (tuple): A packed node.
prop:
Notes:
The human readable value is only available if the node came from a
storm query execution where the ``repr`` key was passed into the
``opts`` argument with a True value.
The prop argument may be the full property name (foo:bar:baz), relative
property name (:baz) , or the unadorned property name (baz).
Returns:
str: The human readable property value. If the property is not present, returns None.
'''
form = pode[0][0]
if prop.startswith(form):
prop = prop[len(form):]
if prop[0] == ':':
prop = prop[1:]
opropvalu = pode[1].get('props').get(prop)
if opropvalu is None:
return None
propvalu = pode[1].get('reprs', {}).get(prop)
if propvalu is None:
return str(opropvalu)
return propvalu
def reprTag(pode, tag):
'''
Get the human readable value for the tag timestamp from the pode.
Args:
pode (tuple): A packed node.
tag (str): The tag to get the value for.
Notes:
The human readable value is only available if the node came from a
storm query execution where the ``repr`` key was passed into the
``opts`` argument with a True value.
If the tag does not have a timestamp, this returns a empty string.
Returns:
str: The human readable value for the tag. If the tag is not present, returns None.
'''
tag = tag.lstrip('#')
valu = pode[1]['tags'].get(tag)
if valu is None:
return None
if valu == (None, None):
return ''
mint = s_time.repr(valu[0])
maxt = s_time.repr(valu[1])
valu = f'({mint}, {maxt})'
return valu
def reprTagProps(pode, tag):
'''
Get the human readable values for any tagprops on a tag for a given node.
Args:
pode (tuple): A packed node.
tag (str): The tag to get the tagprops reprs for.
Notes:
The human readable value is only available if the node came from a
storm query execution where the ``repr`` key was passed into the
``opts`` argument with a True value.
If the tag does not have any tagprops associated with it, this returns an empty list.
Returns:
list: A list of tuples, containing the name of the tagprop and the repr value.
'''
ret = []
exists = pode[1]['tags'].get(tag)
if exists is None:
return ret
tagprops = pode[1].get('tagprops', {}).get(tag)
if tagprops is None:
return ret
for prop, valu in tagprops.items():
rval = pode[1].get('tagpropreprs', {}).get(tag, {}).get(prop)
if rval is not None:
ret.append((prop, rval))
else:
ret.append((prop, str(valu)))
return sorted(ret, key=lambda x: x[0])
|
|
# Many scipy.stats functions support `axis` and `nan_policy` parameters.
# When the two are combined, it can be tricky to get all the behavior just
# right. This file contains a suite of common tests for scipy.stats functions
# that support `axis` and `nan_policy` and additional tests for some associated
# functions in stats._util.
from itertools import product, combinations_with_replacement
import re
import pickle
import pytest
import numpy as np
from numpy.lib import NumpyVersion
from numpy.testing import assert_allclose, assert_equal
from scipy import stats
axis_nan_policy_cases = [
# function, args, kwds, number of samples, paired, unpacker function
# args, kwds typically aren't needed; just showing that they work
(stats.kruskal, tuple(), dict(), 3, False, None), # 4 samples is slow
(stats.ranksums, ('less',), dict(), 2, False, None),
(stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, False, None),
(stats.wilcoxon, ('pratt',), {'mode': 'auto'}, 2, True, None),
(stats.wilcoxon, tuple(), dict(), 1, True, None),
]
# If the message is one of those expected, put nans in
# appropriate places of `statistics` and `pvalues`
too_small_messages = {"The input contains nan", # for nan_policy="raise"
"Degrees of freedom <= 0 for slice",
"x and y should have at least 5 elements",
"Data must be at least length 3",
"The sample must contain at least two",
"x and y must contain at least two",
"division by zero",
"Mean of empty slice",
"Data passed to ks_2samp must not be empty",
"Not enough test observations",
"Not enough other observations",
"At least one observation is required",
"zero-size array to reduction operation maximum",
"`x` and `y` must be of nonzero size.",
"The exact distribution of the Wilcoxon test"}
def _mixed_data_generator(n_samples, n_repetitions, axis, rng,
paired=False):
# generate random samples to check the response of hypothesis tests to
# samples with different (but broadcastable) shapes and various
# nan patterns (e.g. all nans, some nans, no nans) along axis-slices
data = []
for i in range(n_samples):
n_patterns = 6 # number of distinct nan patterns
n_obs = 20 if paired else 20 + i # observations per axis-slice
x = np.ones((n_repetitions, n_patterns, n_obs)) * np.nan
for j in range(n_repetitions):
samples = x[j, :, :]
# case 0: axis-slice with all nans (0 reals)
# cases 1-3: axis-slice with 1-3 reals (the rest nans)
# case 4: axis-slice with mostly (all but two) reals
# case 5: axis slice with all reals
for k, n_reals in enumerate([0, 1, 2, 3, n_obs-2, n_obs]):
# for cases 1-3, need paired nansw to be in the same place
indices = rng.permutation(n_obs)[:n_reals]
samples[k, indices] = rng.random(size=n_reals)
# permute the axis-slices just to show that order doesn't matter
samples[:] = rng.permutation(samples, axis=0)
# For multi-sample tests, we want to test broadcasting and check
# that nan policy works correctly for each nan pattern for each input.
# This takes care of both simultaneosly.
new_shape = [n_repetitions] + [1]*n_samples + [n_obs]
new_shape[1 + i] = 6
x = x.reshape(new_shape)
x = np.moveaxis(x, -1, axis)
data.append(x)
return data
def _homogeneous_data_generator(n_samples, n_repetitions, axis, rng,
paired=False, all_nans=True):
# generate random samples to check the response of hypothesis tests to
# samples with different (but broadcastable) shapes and homogeneous
# data (all nans or all finite)
data = []
for i in range(n_samples):
n_obs = 20 if paired else 20 + i # observations per axis-slice
shape = [n_repetitions] + [1]*n_samples + [n_obs]
shape[1 + i] = 2
x = np.ones(shape) * np.nan if all_nans else rng.random(shape)
x = np.moveaxis(x, -1, axis)
data.append(x)
return data
def nan_policy_1d(hypotest, data1d, unpacker, *args,
nan_policy='raise', paired=False, _no_deco=True, **kwds):
# Reference implementation for how `nan_policy` should work for 1d samples
if nan_policy == 'raise':
for sample in data1d:
if np.any(np.isnan(sample)):
raise ValueError("The input contains nan values")
elif nan_policy == 'propagate':
# For all hypothesis tests tested, returning nans is the right thing.
# But many hypothesis tests don't propagate correctly (e.g. they treat
# np.nan the same as np.inf, which doesn't make sense when ranks are
# involved) so override that behavior here.
for sample in data1d:
if np.any(np.isnan(sample)):
return np.nan, np.nan
elif nan_policy == 'omit':
# manually omit nans (or pairs in which at least one element is nan)
if not paired:
data1d = [sample[~np.isnan(sample)] for sample in data1d]
else:
nan_mask = np.isnan(data1d[0])
for sample in data1d[1:]:
nan_mask = np.logical_or(nan_mask, np.isnan(sample))
data1d = [sample[~nan_mask] for sample in data1d]
return unpacker(hypotest(*data1d, *args, _no_deco=_no_deco, **kwds))
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "paired",
"unpacker"), axis_nan_policy_cases)
@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
@pytest.mark.parametrize(("axis"), (1,))
@pytest.mark.parametrize(("data_generator"), ("mixed",))
def test_axis_nan_policy_fast(hypotest, args, kwds, n_samples, paired,
unpacker, nan_policy, axis,
data_generator):
_axis_nan_policy_test(hypotest, args, kwds, n_samples, paired,
unpacker, nan_policy, axis, data_generator)
@pytest.mark.slow
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "paired",
"unpacker"), axis_nan_policy_cases)
@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
@pytest.mark.parametrize(("axis"), range(-3, 3))
@pytest.mark.parametrize(("data_generator"),
("all_nans", "all_finite", "mixed"))
def test_axis_nan_policy_full(hypotest, args, kwds, n_samples, paired,
unpacker, nan_policy, axis,
data_generator):
_axis_nan_policy_test(hypotest, args, kwds, n_samples, paired,
unpacker, nan_policy, axis, data_generator)
def _axis_nan_policy_test(hypotest, args, kwds, n_samples, paired,
unpacker, nan_policy, axis, data_generator):
# Tests the 1D and vectorized behavior of hypothesis tests against a
# reference implementation (nan_policy_1d with np.ndenumerate)
# Some hypothesis tests return a non-iterable that needs an `unpacker` to
# extract the statistic and p-value. For those that don't:
if not unpacker:
def unpacker(res):
return res
if NumpyVersion(np.__version__) < '1.18.0':
pytest.xfail("Generator `permutation` method doesn't support `axis`")
rng = np.random.default_rng(0)
# Generate multi-dimensional test data with all important combinations
# of patterns of nans along `axis`
n_repetitions = 3 # number of repetitions of each pattern
data_gen_kwds = {'n_samples': n_samples, 'n_repetitions': n_repetitions,
'axis': axis, 'rng': rng, 'paired': paired}
if data_generator == 'mixed':
inherent_size = 6 # number of distinct types of patterns
data = _mixed_data_generator(**data_gen_kwds)
elif data_generator == 'all_nans':
inherent_size = 2 # hard-coded in _homogeneous_data_generator
data_gen_kwds['all_nans'] = True
data = _homogeneous_data_generator(**data_gen_kwds)
elif data_generator == 'all_finite':
inherent_size = 2 # hard-coded in _homogeneous_data_generator
data_gen_kwds['all_nans'] = False
data = _homogeneous_data_generator(**data_gen_kwds)
output_shape = [n_repetitions] + [inherent_size]*n_samples
# To generate reference behavior to compare against, loop over the axis-
# slices in data. Make indexing easier by moving `axis` to the end and
# broadcasting all samples to the same shape.
data_b = [np.moveaxis(sample, axis, -1) for sample in data]
data_b = [np.broadcast_to(sample, output_shape + [sample.shape[-1]])
for sample in data_b]
statistics = np.zeros(output_shape)
pvalues = np.zeros(output_shape)
for i, _ in np.ndenumerate(statistics):
data1d = [sample[i] for sample in data_b]
with np.errstate(divide='ignore', invalid='ignore'):
try:
res1d = nan_policy_1d(hypotest, data1d, unpacker, *args,
nan_policy=nan_policy, paired=paired,
_no_deco=True, **kwds)
# Eventually we'll check the results of a single, vectorized
# call of `hypotest` against the arrays `statistics` and
# `pvalues` populated using the reference `nan_policy_1d`.
# But while we're at it, check the results of a 1D call to
# `hypotest` against the reference `nan_policy_1d`.
res1db = unpacker(hypotest(*data1d, *args,
nan_policy=nan_policy, **kwds))
assert_equal(res1db[0], res1d[0])
if len(res1db) == 2:
assert_equal(res1db[1], res1d[1])
# When there is not enough data in 1D samples, many existing
# hypothesis tests raise errors instead of returning nans .
# For vectorized calls, we put nans in the corresponding elements
# of the output.
except (RuntimeWarning, ValueError, ZeroDivisionError) as e:
# whatever it is, make sure same error is raised by both
# `nan_policy_1d` and `hypotest`
with pytest.raises(type(e), match=re.escape(str(e))):
nan_policy_1d(hypotest, data1d, unpacker, *args,
nan_policy=nan_policy, paired=paired,
_no_deco=True, **kwds)
with pytest.raises(type(e), match=re.escape(str(e))):
hypotest(*data1d, *args, nan_policy=nan_policy, **kwds)
if any([str(e).startswith(message)
for message in too_small_messages]):
res1d = np.nan, np.nan
else:
raise e
statistics[i] = res1d[0]
if len(res1d) == 2:
pvalues[i] = res1d[1]
# Perform a vectorized call to the hypothesis test.
# If `nan_policy == 'raise'`, check that it raises the appropriate error.
# If not, compare against the output against `statistics` and `pvalues`
if nan_policy == 'raise' and not data_generator == "all_finite":
message = 'The input contains nan values'
with pytest.raises(ValueError, match=message):
hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds)
else:
with np.errstate(divide='ignore', invalid='ignore'):
res = unpacker(hypotest(*data, axis=axis, nan_policy=nan_policy,
*args, **kwds))
assert_equal(res[0], statistics)
assert_equal(res[0].dtype, statistics.dtype)
if len(res) == 2:
assert_equal(res[1], pvalues)
assert_equal(res[1].dtype, pvalues.dtype)
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "paired",
"unpacker"), axis_nan_policy_cases)
@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
@pytest.mark.parametrize(("data_generator"),
("all_nans", "all_finite", "mixed", "empty"))
def test_axis_nan_policy_axis_is_None(hypotest, args, kwds, n_samples, paired,
unpacker, nan_policy, data_generator):
# check for correct behavior when `axis=None`
if not unpacker:
def unpacker(res):
return res
if NumpyVersion(np.__version__) < '1.18.0':
pytest.xfail("Generator `permutation` method doesn't support `axis`")
rng = np.random.default_rng(0)
if data_generator == "empty":
data = [rng.random((2, 0)) for i in range(n_samples)]
else:
data = [rng.random((2, 20)) for i in range(n_samples)]
if data_generator == "mixed":
masks = [rng.random((2, 20)) > 0.9 for i in range(n_samples)]
for sample, mask in zip(data, masks):
sample[mask] = np.nan
elif data_generator == "all_nans":
data = [sample * np.nan for sample in data]
data_raveled = [sample.ravel() for sample in data]
if nan_policy == 'raise' and data_generator not in {"all_finite", "empty"}:
message = 'The input contains nan values'
# check for correct behavior whether or not data is 1d to begin with
with pytest.raises(ValueError, match=message):
hypotest(*data, axis=None, nan_policy=nan_policy,
*args, **kwds)
with pytest.raises(ValueError, match=message):
hypotest(*data_raveled, axis=None, nan_policy=nan_policy,
*args, **kwds)
else:
# behavior of reference implementation with 1d input, hypotest with 1d
# input, and hypotest with Nd input should match, whether that means
# that outputs are equal or they raise the same exception
ea_str, eb_str, ec_str = None, None, None
with np.errstate(divide='ignore', invalid='ignore'):
try:
res1da = nan_policy_1d(hypotest, data_raveled, unpacker, *args,
nan_policy=nan_policy, paired=paired,
_no_deco=True, **kwds)
except (RuntimeWarning, ValueError, ZeroDivisionError) as ea:
ea_str = str(ea)
try:
res1db = unpacker(hypotest(*data_raveled, *args,
nan_policy=nan_policy, **kwds))
except (RuntimeWarning, ValueError, ZeroDivisionError) as eb:
eb_str = str(eb)
try:
res1dc = unpacker(hypotest(*data, *args, axis=None,
nan_policy=nan_policy, **kwds))
except (RuntimeWarning, ValueError, ZeroDivisionError) as ec:
ec_str = str(ec)
if ea_str or eb_str or ec_str:
assert any([str(ea_str).startswith(message)
for message in too_small_messages])
assert ea_str == eb_str == ec_str
else:
assert_equal(res1db, res1da)
assert_equal(res1dc, res1da)
@pytest.mark.parametrize(("axis"), (0, 1, 2))
def test_axis_nan_policy_decorated_positional_axis(axis):
# Test for correct behavior of function decorated with
# _axis_nan_policy_decorator whether `axis` is provided as positional or
# keyword argument
if NumpyVersion(np.__version__) < '1.18.0':
pytest.xfail("Avoid test failures due to old version of NumPy")
shape = (8, 9, 10)
rng = np.random.default_rng(0)
x = rng.random(shape)
y = rng.random(shape)
res1 = stats.mannwhitneyu(x, y, True, 'two-sided', axis)
res2 = stats.mannwhitneyu(x, y, True, 'two-sided', axis=axis)
assert_equal(res1, res2)
message = "mannwhitneyu() got multiple values for argument 'axis'"
with pytest.raises(TypeError, match=re.escape(message)):
stats.mannwhitneyu(x, y, True, 'two-sided', axis, axis=axis)
def test_axis_nan_policy_decorated_positional_args():
# Test for correct behavior of function decorated with
# _axis_nan_policy_decorator when function accepts *args
if NumpyVersion(np.__version__) < '1.18.0':
pytest.xfail("Avoid test failures due to old version of NumPy")
shape = (3, 8, 9, 10)
rng = np.random.default_rng(0)
x = rng.random(shape)
x[0, 0, 0, 0] = np.nan
stats.kruskal(*x)
message = "kruskal() got an unexpected keyword argument 'args'"
with pytest.raises(TypeError, match=re.escape(message)):
stats.kruskal(args=x)
with pytest.raises(TypeError, match=re.escape(message)):
stats.kruskal(*x, args=x)
def test_axis_nan_policy_decorated_keyword_samples():
# Test for correct behavior of function decorated with
# _axis_nan_policy_decorator whether samples are provided as positional or
# keyword arguments
if NumpyVersion(np.__version__) < '1.18.0':
pytest.xfail("Avoid test failures due to old version of NumPy")
shape = (2, 8, 9, 10)
rng = np.random.default_rng(0)
x = rng.random(shape)
x[0, 0, 0, 0] = np.nan
res1 = stats.mannwhitneyu(*x)
res2 = stats.mannwhitneyu(x=x[0], y=x[1])
assert_equal(res1, res2)
message = "mannwhitneyu() got multiple values for argument"
with pytest.raises(TypeError, match=re.escape(message)):
stats.mannwhitneyu(*x, x=x[0], y=x[1])
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "paired",
"unpacker"), axis_nan_policy_cases)
def test_axis_nan_policy_decorated_pickled(hypotest, args, kwds, n_samples,
paired, unpacker):
if NumpyVersion(np.__version__) < '1.18.0':
rng = np.random.RandomState(0)
else:
rng = np.random.default_rng(0)
# Some hypothesis tests return a non-iterable that needs an `unpacker` to
# extract the statistic and p-value. For those that don't:
if not unpacker:
def unpacker(res):
return res
data = rng.uniform(size=(n_samples, 2, 30))
pickled_hypotest = pickle.dumps(hypotest)
unpickled_hypotest = pickle.loads(pickled_hypotest)
res1 = unpacker(hypotest(*data, *args, axis=-1, **kwds))
res2 = unpacker(unpickled_hypotest(*data, *args, axis=-1, **kwds))
assert_allclose(res1, res2, rtol=1e-12)
def test_check_empty_inputs():
# Test that _check_empty_inputs is doing its job, at least for single-
# sample inputs. (Multi-sample functionality is tested below.)
# If the input sample is not empty, it should return None.
# If the input sample is empty, it should return an array of NaNs or an
# empty array of appropriate shape. np.mean is used as a reference for the
# output because, like the statistics calculated by these functions,
# it works along and "consumes" `axis` but preserves the other axes.
for i in range(5):
for combo in combinations_with_replacement([0, 1, 2], i):
for axis in range(len(combo)):
samples = (np.zeros(combo),)
output = stats._axis_nan_policy._check_empty_inputs(samples,
axis)
if output is not None:
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice.")
sup.filter(RuntimeWarning, "invalid value encountered")
reference = samples[0].mean(axis=axis)
np.testing.assert_equal(output, reference)
def _check_arrays_broadcastable(arrays, axis):
# https://numpy.org/doc/stable/user/basics.broadcasting.html
# "When operating on two arrays, NumPy compares their shapes element-wise.
# It starts with the trailing (i.e. rightmost) dimensions and works its
# way left.
# Two dimensions are compatible when
# 1. they are equal, or
# 2. one of them is 1
# ...
# Arrays do not need to have the same number of dimensions."
# (Clarification: if the arrays are compatible according to the criteria
# above and an array runs out of dimensions, it is still compatible.)
# Below, we follow the rules above except ignoring `axis`
n_dims = max([arr.ndim for arr in arrays])
if axis is not None:
# convert to negative axis
axis = (-n_dims + axis) if axis >= 0 else axis
for dim in range(1, n_dims+1): # we'll index from -1 to -n_dims, inclusive
if -dim == axis:
continue # ignore lengths along `axis`
dim_lengths = set()
for arr in arrays:
if dim <= arr.ndim and arr.shape[-dim] != 1:
dim_lengths.add(arr.shape[-dim])
if len(dim_lengths) > 1:
return False
return True
@pytest.mark.slow
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "paired",
"unpacker"), axis_nan_policy_cases)
def test_empty(hypotest, args, kwds, n_samples, paired, unpacker):
# test for correct output shape when at least one input is empty
def small_data_generator(n_samples, n_dims):
def small_sample_generator(n_dims):
# return all possible "small" arrays in up to n_dim dimensions
for i in n_dims:
# "small" means with size along dimension either 0 or 1
for combo in combinations_with_replacement([0, 1, 2], i):
yield np.zeros(combo)
# yield all possible combinations of small samples
gens = [small_sample_generator(n_dims) for i in range(n_samples)]
for i in product(*gens):
yield i
n_dims = [2, 3]
for samples in small_data_generator(n_samples, n_dims):
# this test is only for arrays of zero size
if not any((sample.size == 0 for sample in samples)):
continue
max_axis = max((sample.ndim for sample in samples))
# need to test for all valid values of `axis` parameter, too
for axis in range(-max_axis, max_axis):
try:
# After broadcasting, all arrays are the same shape, so
# the shape of the output should be the same as a single-
# sample statistic. Use np.mean as a reference.
concat = stats._stats_py._broadcast_concatenate(samples, axis)
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice.")
sup.filter(RuntimeWarning, "invalid value encountered")
expected = np.mean(concat, axis=axis) * np.nan
res = hypotest(*samples, *args, axis=axis, **kwds)
if hasattr(res, 'statistic'):
assert_equal(res.statistic, expected)
assert_equal(res.pvalue, expected)
else:
assert_equal(res, expected)
except ValueError:
# confirm that the arrays truly are not broadcastable
assert not _check_arrays_broadcastable(samples, axis)
# confirm that _both_ `_broadcast_concatenate` and `hypotest`
# produce this information.
message = "Array shapes are incompatible for broadcasting."
with pytest.raises(ValueError, match=message):
stats._stats_py._broadcast_concatenate(samples, axis)
with pytest.raises(ValueError, match=message):
hypotest(*samples, *args, axis=axis, **kwds)
|
|
# Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from asynq import asynq, AsyncContext, scheduler
from asynq.tools import (
amap,
afilter,
afilterfalse,
amin,
amax,
asift,
asorted,
acached_per_instance,
alru_cache,
alazy_constant,
aretry,
call_with_context,
deduplicate,
AsyncTimer,
AsyncEventHook,
)
from qcore.asserts import (
assert_eq,
assert_gt,
assert_is,
AssertRaises,
assert_unordered_list_eq,
)
from qcore import get_original_fn
import inspect
import pickle
from unittest import mock
@asynq()
def inner_fn(x):
pass
@asynq()
def filter_fn(elt):
yield inner_fn.asynq(elt)
return elt is not None
@asynq()
def alen(seq):
return len(seq)
def gen():
yield 1
yield None
def test_afilter():
assert_eq([], afilter.asynq(filter_fn, []).value())
assert_eq([], afilter.asynq(filter_fn, [None]).value())
assert_eq([1], afilter(filter_fn, [None, 1, None]))
assert_eq([1], afilter.asynq(filter_fn, [None, 1, None]).value())
assert_eq([1], afilter(None, [None, 1, None]))
assert_eq([1], afilter(filter_fn, gen()))
assert_eq([1], afilter(None, gen()))
def test_afilterfalse():
assert_eq([], afilterfalse.asynq(filter_fn, []).value())
assert_eq([None], afilterfalse.asynq(filter_fn, [None]).value())
assert_eq([None, None], afilterfalse(filter_fn, [None, 1, None]))
assert_eq([None, None], afilterfalse.asynq(filter_fn, [None, 1, None]).value())
assert_eq([None], afilterfalse(filter_fn, gen()))
def test_asift():
assert_eq(([], []), asift.asynq(filter_fn, []).value())
assert_eq(([], [None]), asift.asynq(filter_fn, [None]).value())
assert_eq(([1], [None, None]), asift(filter_fn, [None, 1, None]))
assert_eq(([1], [None, None]), asift.asynq(filter_fn, [None, 1, None]).value())
def test_amap():
assert_eq([False], list(amap(filter_fn, [None])))
assert_eq([True], list(amap(filter_fn, [4])))
assert_eq([], list(amap(filter_fn, [])))
assert_eq([False, True, False], list(amap(filter_fn, [None, "", None])))
def test_asorted():
assert_eq([], asorted([], key=filter_fn))
assert_eq([None], asorted([None], key=filter_fn))
assert_eq([None, True], asorted([True, None], key=filter_fn))
assert_eq([1, 2], asorted([2, 1]))
def test_amax():
assert_eq(1, amax(1, None, key=filter_fn))
assert_eq(1, amax([1, None], key=filter_fn))
assert_eq(1, amax((elt for elt in (1, None)), key=filter_fn))
assert_eq([1, 2, 3], amax([1], [1, 2, 3], [1, 2], key=alen))
assert_eq([1, 2, 3], amax([[1], [1, 2, 3], [1, 2]], key=alen))
assert_eq(4, amax(1, 2, 3, 4))
with AssertRaises(TypeError):
amax(key=filter_fn)
with AssertRaises(ValueError):
amax([], key=filter_fn)
with AssertRaises(TypeError):
amax([], key=filter_fn, random_keyword_argument="raising a TypeError")
def test_amin():
assert_is(None, amin(1, None, key=filter_fn))
assert_is(None, amin([1, None], key=filter_fn))
assert_is(None, amin((elt for elt in (1, None)), key=filter_fn))
assert_eq([1], amin([1], [1, 2, 3], [1, 2], key=alen))
assert_eq([1], amin([[1], [1, 2, 3], [1, 2]], key=alen))
assert_eq(1, amin(1, 2, 3, 4))
with AssertRaises(TypeError):
amin(key=filter_fn)
with AssertRaises(ValueError):
amin([], key=filter_fn)
with AssertRaises(TypeError):
amin([], key=filter_fn, random_keyword_argument="raising a TypeError")
class AsyncObject(object):
cls_value = 0
def __init__(self):
self.value = 0
@acached_per_instance()
@asynq()
def get_value(self, index):
self.value += 1
return self.value
@acached_per_instance()
@asynq()
def with_kwargs(self, x=1, y=2, z=3):
self.value += x + y + z
return self.value
@acached_per_instance()
@asynq()
def raises_exception(self):
assert False
@acached_per_instance()
@asynq()
def with_kwonly_arg(self, *, arg=1):
return arg
@deduplicate()
@asynq()
def increment_value_method(self, val=1):
self.value += val
@deduplicate()
@asynq()
@staticmethod
def deduplicated_static_method(val=1):
AsyncObject.cls_value += val
class UnhashableAcached(AsyncObject):
__hash__ = None # type: ignore
def test_acached_per_instance():
for cls in (AsyncObject, UnhashableAcached):
obj = cls()
cache = type(obj).get_value.decorator.__acached_per_instance_cache__
assert_eq(0, len(cache), extra=repr(cache))
assert_eq(1, obj.get_value(0))
assert_eq(1, obj.get_value(0))
assert_eq(2, obj.get_value(1))
assert_eq(1, obj.get_value(0))
assert_eq(1, obj.get_value(index=0))
assert_eq(1, obj.get_value.asynq(index=0).value())
assert_eq(8, obj.with_kwargs())
assert_eq(8, obj.with_kwargs(z=3))
assert_eq(17, obj.with_kwargs(x=3, y=3))
assert_eq(1, len(cache), extra=repr(cache))
assert_eq(1, obj.with_kwonly_arg(arg=1))
del obj
assert_eq(0, len(cache), extra=repr(cache))
def test_acached_per_instance_exception_handling():
obj = AsyncObject()
try:
obj.raises_exception()
except AssertionError:
# the exception should not affect the internals of the scheduler, and the active task
# should get cleaned up
assert_is(None, scheduler.get_active_task())
def test_alru_cache():
_check_alru_cache()
@asynq()
def _check_alru_cache():
@alru_cache(maxsize=1, key_fn=lambda args, kwargs: args[0] % 2 == 0)
@asynq()
def cube(n):
return n * n * n
assert_eq(1, (yield cube.asynq(1)))
# hit the cache
assert_eq(1, (yield cube.asynq(3)))
# cache miss
assert_eq(8, (yield cube.asynq(2)))
# now it's a cache miss
assert_eq(27, (yield cube.asynq(3)))
def test_alazy_constant():
_check_alazy_constant_no_ttl()
_check_alazy_constant_ttl()
constant_call_count = 0
@asynq()
def _check_alazy_constant_no_ttl():
global constant_call_count
constant_call_count = 0
@alazy_constant()
@asynq()
def constant():
global constant_call_count
constant_call_count += 1
return constant_call_count
# multiple calls in a short time should only call it once
assert_eq(1, (yield constant.asynq()))
assert_eq(1, (yield constant.asynq()))
assert_eq(1, (yield constant.asynq()))
# but after a dirty, it should be called again
constant.dirty()
assert_eq(2, (yield constant.asynq()))
assert_eq(2, (yield constant.asynq()))
assert_eq(2, (yield constant.asynq()))
@asynq()
def _check_alazy_constant_ttl():
global constant_call_count
constant_call_count = 0
@alazy_constant(ttl=100000) # 100ms
@asynq()
def constant():
global constant_call_count
constant_call_count += 1
return constant_call_count
# multiple calls in a short time should only call it once
assert_eq(1, (yield constant.asynq()))
assert_eq(1, (yield constant.asynq()))
assert_eq(1, (yield constant.asynq()))
# but after a long enough time, it should be called again
time.sleep(0.1)
assert_eq(2, (yield constant.asynq()))
assert_eq(2, (yield constant.asynq()))
assert_eq(2, (yield constant.asynq()))
# or after a dirty
constant.dirty()
assert_eq(3, (yield constant.asynq()))
assert_eq(3, (yield constant.asynq()))
assert_eq(3, (yield constant.asynq()))
class AnyException(Exception):
pass
class AnyOtherException(Exception):
pass
@aretry(Exception)
@asynq()
def retry_it():
pass
class TestRetry(object):
def create_function(self, exception_type, max_tries):
fn_body = mock.Mock()
fn_body.return_value = []
@aretry(exception_type, max_tries=max_tries)
@asynq()
def function(*args, **kwargs):
return fn_body(*args, **kwargs)
return function, fn_body
def test_pickling(self):
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(retry_it, protocol=protocol)
assert_is(retry_it, pickle.loads(pickled))
def test_retry_passes_all_arguments(self):
function, fn_body = self.create_function(AnyException, max_tries=2)
function(1, 2, foo=3)
fn_body.assert_called_once_with(1, 2, foo=3)
def test_retry_does_not_retry_on_no_exception(self):
function, fn_body = self.create_function(AnyException, max_tries=3)
function()
fn_body.assert_called_once_with()
def test_retry_does_not_retry_on_unspecified_exception(self):
function, fn_body = self.create_function(AnyException, max_tries=3)
fn_body.side_effect = AnyOtherException
with AssertRaises(AnyOtherException):
function()
fn_body.assert_called_once_with()
def test_retry_retries_on_provided_exception(self):
max_tries = 4
function, fn_body = self.create_function(AnyException, max_tries)
fn_body.side_effect = AnyException
with AssertRaises(AnyException):
function()
assert_eq(max_tries, fn_body.call_count)
def test_retry_requires_max_try_at_least_one(self):
with AssertRaises(Exception):
self.create_function(AnyException, max_tries=0)
self.create_function(AnyException, max_tries=1)
def test_retry_can_take_multiple_exceptions(self):
max_tries = 4
expected_exceptions = (AnyException, AnyOtherException)
function, fn_body = self.create_function(expected_exceptions, max_tries)
fn_body.side_effect = AnyException
with AssertRaises(AnyException):
function()
assert_eq(max_tries, fn_body.call_count)
fn_body.reset_mock()
fn_body.side_effect = AnyOtherException
with AssertRaises(AnyOtherException):
function()
assert_eq(max_tries, fn_body.call_count)
def test_retry_preserves_argspec(self):
def fn(foo, bar, baz=None, **kwargs):
pass
decorated = aretry(Exception)(fn)
assert_eq(
inspect.getargspec(fn), inspect.getargspec(get_original_fn(decorated))
)
class Ctx(AsyncContext):
is_on = False
def pause(self):
Ctx.is_on = False
def resume(self):
Ctx.is_on = True
@asynq()
def assert_state(value):
yield AsyncObject().get_value.asynq(value)
assert_is(value, Ctx.is_on)
def test_call_with_context():
assert_state(False)
call_with_context(Ctx(), assert_state, True)
i = 0
@deduplicate()
@asynq()
def increment_value(val=1):
global i
i += val
@deduplicate()
@asynq()
def recursive_incrementer(n):
if n == 0:
return (yield increment_value.asynq(n))
return recursive_incrementer(n - 1)
@deduplicate()
@asynq()
def call_with_dirty():
call_with_dirty.dirty()
@deduplicate()
@asynq()
def recursive_call_with_dirty():
global i
if i > 0:
return i
i += 1
recursive_call_with_dirty.dirty()
yield recursive_call_with_dirty.asynq()
@asynq()
def dummy():
pass
@asynq()
def deduplicate_caller():
yield deduplicated_recusive.asynq()
@deduplicate()
@asynq()
def deduplicated_recusive():
global i
existing = i
i = 1
yield dummy.asynq()
if existing == 0:
deduplicate_caller()
@deduplicate()
@asynq()
def call_with_kwonly_arg(*, arg):
return arg
def test_deduplicate():
_check_deduplicate()
@asynq()
def _check_deduplicate():
global i
i = 0
AsyncObject.cls_value = 0
yield increment_value.asynq()
assert_eq(1, i)
yield increment_value.asynq(), increment_value.asynq(1)
assert_eq(2, i)
obj = AsyncObject()
yield obj.increment_value_method.asynq(), obj.increment_value_method.asynq(1)
assert_eq(1, obj.value)
yield AsyncObject.deduplicated_static_method.asynq(), AsyncObject.deduplicated_static_method.asynq(
1
)
assert_eq(1, AsyncObject.cls_value)
i = 0
yield recursive_call_with_dirty.asynq()
yield call_with_dirty.asynq()
with AssertRaises(TypeError):
yield call_with_kwonly_arg.asynq(1)
assert_eq(1, (yield call_with_kwonly_arg.asynq(arg=1)))
i = 0
deduplicate_caller()
def test_deduplicate_recursion():
_check_deduplicate_recursion()
@asynq()
def _check_deduplicate_recursion():
yield recursive_incrementer.asynq(20), increment_value.asynq(0)
def test_async_timer():
_check_async_timer()
@asynq()
def _slow_task(t):
yield None
time.sleep(t)
return 0
@asynq()
def _timed_slow_task(t):
with AsyncTimer() as timer:
yield None
time.sleep(t)
return timer.total_time
@asynq()
def _check_async_timer():
with AsyncTimer() as t:
results = yield [
_slow_task.asynq(0.1),
_timed_slow_task.asynq(0.1),
_slow_task.asynq(0.1),
_timed_slow_task.asynq(0.1),
]
assert_eq(0, results[0])
assert_eq(105000, results[1], tolerance=5000)
assert_eq(0, results[0])
assert_eq(105000, results[3], tolerance=5000)
assert_eq(210000, sum(results), tolerance=10000)
assert_eq(410000, t.total_time, tolerance=10000)
assert_gt(t.total_time, sum(results))
def test_async_event_hook():
calls = []
@asynq()
def handler1(*args):
assert_gt(len(args), 0)
calls.append("handler1%s" % str(args))
def handler2(*args):
calls.append("handler2%s" % str(args))
hook = AsyncEventHook([handler1])
hook.subscribe(handler2)
# trigger
hook.trigger(1, 2, "a")
assert_unordered_list_eq(["handler1(1, 2, 'a')", "handler2(1, 2, 'a')"], calls)
calls = []
@asynq()
def async_trigger():
yield hook.trigger.asynq(2, 3)
async_trigger()
assert_unordered_list_eq(["handler1(2, 3)", "handler2(2, 3)"], calls)
# safe_trigger
calls = []
hook2 = AsyncEventHook([handler1, handler2])
# calling it with no args will raise AssertionError in handler1
with AssertRaises(AssertionError):
hook2.safe_trigger()
assert_eq(["handler2()"], calls)
# make sure that the order doesn't matter
calls = []
hook3 = AsyncEventHook([handler2, handler1])
# calling it with no args will raise AssertionError in handler1
with AssertRaises(AssertionError):
hook3.safe_trigger()
assert_eq(["handler2()"], calls)
class DeduplicateClassWrapper:
@deduplicate()
@asynq()
def return_three(self):
return 3
@deduplicate()
@asynq()
def return_five(self):
return 5
@asynq()
def return_three_and_five(self):
return (yield (self.return_three.asynq(), self.return_five.asynq()))
def test_deduplicate_same_class():
obj = DeduplicateClassWrapper()
# make sure the five method has a separate key and therefore there was no cache mixup
assert_eq((3, 5), obj.return_three_and_five())
|
|
"""
Django settings for the admin project.
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
from django.contrib import messages
from api.base.settings import * # noqa
# TODO ALL SETTINGS FROM API WILL BE IMPORTED AND WILL NEED TO BE OVERRRIDEN
# TODO THIS IS A STEP TOWARD INTEGRATING ADMIN & API INTO ONE PROJECT
# import local # Build own local.py (used with postgres)
# TODO - remove duplicated items, as this is now using settings from the API
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# from the OSF settings
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
# Don't allow migrations
DATABASE_ROUTERS = ['admin.base.db.router.NoMigrationRouter']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
# session:
SESSION_COOKIE_NAME = 'admin'
SESSION_COOKIE_SECURE = osf_settings.SECURE_MODE
SESSION_COOKIE_HTTPONLY = osf_settings.SESSION_COOKIE_HTTPONLY
# csrf:
CSRF_COOKIE_NAME = 'admin-csrf'
CSRF_COOKIE_SECURE = osf_settings.SECURE_MODE
# set to False: prereg uses a SPA and ajax and grab the token to use it in the requests
CSRF_COOKIE_HTTPONLY = False
ALLOWED_HOSTS = [
'.osf.io'
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 5,
}
},
]
USE_L10N = False
# Email settings. Account created for testing. Password shouldn't be hardcoded
# [DEVOPS] this should be set to 'django.core.mail.backends.smtp.EmailBackend' in the > dev local.py.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Sendgrid Email Settings - Using OSF credentials.
# Add settings references to local.py
EMAIL_HOST = osf_settings.MAIL_SERVER
EMAIL_HOST_USER = osf_settings.MAIL_USERNAME
EMAIL_HOST_PASSWORD = osf_settings.MAIL_PASSWORD
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# 3rd party
'raven.contrib.django.raven_compat',
'webpack_loader',
'django_nose',
'password_reset',
'guardian',
# OSF
'osf',
# Addons
'addons.osfstorage',
'addons.wiki',
'addons.twofactor',
# Internal apps
'admin.common_auth',
'admin.base',
'admin.pre_reg',
'admin.spam',
'admin.metrics',
'admin.nodes',
'admin.users',
'admin.desk',
'admin.meetings',
'admin.institutions',
'admin.preprint_providers',
)
MIGRATION_MODULES = {
'osf': None,
'reviews': None,
'addons_osfstorage': None,
'addons_wiki': None,
'addons_twofactor': None,
}
USE_TZ = True
TIME_ZONE = 'UTC'
# local development using https
if osf_settings.SECURE_MODE and osf_settings.DEBUG_MODE:
INSTALLED_APPS += ('sslserver',)
# Custom user model (extends AbstractBaseUser)
AUTH_USER_MODEL = 'osf.OSFUser'
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'admin'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
CORS_ALLOW_CREDENTIALS = True
MIDDLEWARE_CLASSES = (
# TokuMX transaction support
# Needs to go before CommonMiddleware, so that transactions are always started,
# even in the event of a redirect. CommonMiddleware may cause other middlewares'
# process_request to be skipped, e.g. when a trailing slash is omitted
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
MESSAGE_TAGS = {
messages.SUCCESS: 'text-success',
messages.ERROR: 'text-danger',
messages.WARNING: 'text-warning',
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
}
}]
ROOT_URLCONF = 'admin.base.urls'
WSGI_APPLICATION = 'admin.base.wsgi.application'
ADMIN_BASE = ''
STATIC_URL = '/static/'
LOGIN_URL = 'account/login/'
LOGIN_REDIRECT_URL = ADMIN_BASE
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, '../website/static'),
)
LANGUAGE_CODE = 'en-us'
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'public/js/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--verbosity=2']
# Keen.io settings in local.py
KEEN_PROJECT_ID = osf_settings.KEEN['private']['project_id']
KEEN_READ_KEY = osf_settings.KEEN['private']['read_key']
KEEN_WRITE_KEY = osf_settings.KEEN['private']['write_key']
KEEN_CREDENTIALS = {
'keen_ready': False
}
if KEEN_CREDENTIALS['keen_ready']:
KEEN_CREDENTIALS.update({
'keen_project_id': KEEN_PROJECT_ID,
'keen_read_key': KEEN_READ_KEY,
'keen_write_key': KEEN_WRITE_KEY
})
ENTRY_POINTS = {'osf4m': 'osf4m', 'prereg_challenge_campaign': 'prereg',
'institution_campaign': 'institution'}
# Set in local.py
DESK_KEY = ''
DESK_KEY_SECRET = ''
TINYMCE_APIKEY = ''
if DEBUG:
INSTALLED_APPS += ('debug_toolbar', )
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware', )
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda(_): True,
'DISABLE_PANELS': {
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.redirects.RedirectsPanel'
}
}
|
|
"""Repository support for Bazaar."""
from __future__ import unicode_literals
import os
import dateutil.parser
from django.utils import six
from django.utils.encoding import force_text
from django.utils.timezone import utc
from reviewboard.scmtools.core import SCMClient, SCMTool, HEAD, PRE_CREATION
from reviewboard.scmtools.errors import (FileNotFoundError,
InvalidRevisionFormatError,
RepositoryNotFoundError, SCMError)
from reviewboard.ssh import utils as sshutils
try:
import urlparse
uses_netloc = urlparse.uses_netloc
except ImportError:
import urllib.parse
uses_netloc = urllib.parse.uses_netloc
# Register these URI schemes so we can handle them properly.
sshutils.ssh_uri_schemes.append('bzr+ssh')
uses_netloc.extend(['bzr', 'bzr+ssh'])
class BZRTool(SCMTool):
"""Repository support for Canonical's Bazaar.
Bazaar is one of the first distributed version control systems, often
used with the `Launchpad <https://launchpad.net>`_ service.
Bazaar can be downloaded at http://bazaar-vcs.org/.
"""
scmtool_id = 'bazaar'
name = 'Bazaar'
dependencies = {
'executables': ['bzr'],
}
# Timestamp format in bzr diffs.
# This isn't totally accurate: there should be a %z at the end.
# Unfortunately, strptime() doesn't support %z.
DIFF_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
# "bzr diff" indicates that a file is new by setting the old
# timestamp to the epoch time.
PRE_CREATION_TIMESTAMP = '1970-01-01 00:00:00 +0000'
REVISION_SPEC_KEYWORDS = (
'ancestor:',
'annotate:',
'before:',
'branch:',
'date:',
'last:',
'mainline:',
'revid:',
'revno:',
'submit:',
'tag:',
)
def __init__(self, repository):
"""Initialize the Bazaar tool.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository to communicate with.
"""
super(BZRTool, self).__init__(repository)
if repository.local_site:
local_site_name = repository.local_site.name
else:
local_site_name = None
self.client = BZRClient(path=repository.path,
local_site_name=local_site_name)
def get_file(self, path, revision, **kwargs):
"""Return the contents from a file with the given path and revision.
Args:
path (unicode):
The path of the file within the repository. This must not be
a full Bazaar repository path.
revision (unicode):
The revision to fetch. If a Bazaar revision specifier keyword
is provided, then it will be used to perform the lookup.
Otherwise, this is assumed to be a date in the form of
``YYYY-MM-DD HH:MM:SS ZZZZ``, the format used in Bazaar diffs.
**kwargs (dict, unused):
Unused additional keyword arguments.
Returns:
bytes:
The contents of the file from the repository.
Raises:
reviewboard.scmtools.errors.FileNotFoundError:
The file at the given revision was not found in the repostiory.
reviewboard.scmtools.errors.InvalidRevisionFormatError:
The ``revision`` argument was in a format that's not supported.
"""
if revision == BZRTool.PRE_CREATION_TIMESTAMP:
return b''
revspec = self._revspec_from_revision(revision)
if revspec is None:
raise InvalidRevisionFormatError(path, revision)
return self.client.get_file(path=path, revspec=revspec)
def file_exists(self, path, revision, **kwargs):
"""Return whether a file exists with the given path and revision.
Args:
path (unicode):
The path of the file within the repository. This must not be
a full Bazaar repository path.
revision (unicode):
The revision to fetch. If a Bazaar revision specifier keyword
is provided, then it will be used to perform the lookup.
Otherwise, this is assumed to be a date in the form of
``YYYY-MM-DD HH:MM:SS ZZZZ``, the format used in Bazaar diffs.
**kwargs (dict, unused):
Unused additional keyword arguments.
Returns:
bool:
``True`` if the file exists. ``False`` if it does not.
Raises:
reviewboard.scmtools.errors.InvalidRevisionFormatError:
The ``revision`` argument was in a format that's not supported.
"""
if revision == BZRTool.PRE_CREATION_TIMESTAMP:
return False
revspec = self._revspec_from_revision(revision)
if revspec is None:
raise InvalidRevisionFormatError(path, revision)
return self.client.get_file_exists(path=path, revspec=revspec)
def parse_diff_revision(self, filename, revision, *args, **kwargs):
"""Parse and return a filename and revision from a diff.
If the revision identifer is a date indicating a new file, then
this will return :py:data:`~reviewboard.scmtools.core.PRE_CREATION`.
Otherwise, the revision identifier is returned directly.
Args:
filename (bytes):
The filename in the diff.
revision (bytes):
The revision in the diff.
**kwargs (dict, unused):
Unused additional keyword arguments.
Returns:
tuple:
A tuple containing two items:
1. The normalized filename as a byte string.
2. The normalized revision as a byte string or a
:py:class:`~reviewboard.scmtools.core.Revision`.
"""
if revision == BZRTool.PRE_CREATION_TIMESTAMP.encode('utf-8'):
revision = PRE_CREATION
return filename, revision
def _revspec_from_revision(self, revision):
"""Return a Bazaar revision specification based on the given revision.
If the revision starts with a Bazaar revision specifier keyword
argument, then the revision will be used as-is (allowing for the `bzr
diff-revid <https://launchpad.net/bzr-diff-revid>`_ plugin to be used).
Otherwise, this will attempt to match a date in
``YYYY-MM-DD HH:MM:SS ZZZZ` format (used by Bazaar diffs).
Args:
revision (unicode):
The revision to parse.
Returns:
unicode:
A revision specifier for the given revision. If a supported
revision was not provided, this will return ``None``.
"""
if revision == HEAD:
revspec = 'last:1'
elif revision.startswith(self.REVISION_SPEC_KEYWORDS):
revspec = revision
else:
# Attempt to parse this as a timestamp into a Bazaar date revision
# specifier.
try:
timestamp = dateutil.parser.parse(revision).astimezone(utc)
revspec = 'date:%s' % timestamp.strftime('%Y-%m-%d,%H:%M:%S')
except ValueError:
revspec = None
return revspec
@classmethod
def check_repository(cls, path, username=None, password=None,
local_site_name=None):
"""Check a repository to test its validity.
This checks if a Bazaar repository exists and can be connected to. If
the repository could not be found, an exception will be raised.
Args:
path (unicode):
The repository path.
username (unicode):
The optional username used to connect to the repository.
password (unicode):
The optional password used to connect to the repository.
local_site_name (unicode):
The name of the Local Site that will own the repository.
Raises:
reviewboard.scmtools.errors.RepositoryNotFoundError:
The repository could not be found, or there was an error
communicating with it.
"""
super(BZRTool, cls).check_repository(path, username, password,
local_site_name)
client = BZRClient(path=path,
local_site_name=local_site_name)
if not client.is_valid_repository():
raise RepositoryNotFoundError()
class BZRClient(SCMClient):
"""A client for performing Bazaar requests.
This invokes the command line :command:`bzr` tool to perform file and
repository lookups.
"""
_bzr_plugin_path = None
def __init__(self, path, local_site_name):
"""Initialize the client.
Args:
path (unicode):
The repository path provided by the user.
local_site_name (unicode):
The name of the Local Site owning the repository.
"""
if path.startswith('/'):
self.path = 'file://%s' % path
else:
self.path = path
self.local_site_name = local_site_name
def is_valid_repository(self):
"""Return whether the provided repository information is valid.
Returns:
bool:
``True`` if information on the repository could be found.
``False`` if not.
Raises:
reviewboard.scmtools.errors.SCMError:
There was an error talking to Bazaar.
"""
p = self._run_bzr(['info', self._build_repo_path(self.path)])
errmsg = force_text(p.stderr.read())
ret_code = p.wait()
self._check_error(errmsg)
return ret_code == 0
def get_file(self, path, revspec):
"""Return the contents of a file.
This expects a path within the repository and a Bazaar revision
specifier.
Args:
path (unicode):
The path to the file within the repository.
revspec (unicode):
The Bazaar revision specifier used to look up the file.
Returns:
bytes:
The contents of the file.
Raises:
reviewboard.scmtools.errors.FileNotFoundError:
The file could not be found.
"""
path = self._build_repo_path(path)
p = self._run_bzr(['cat', '-r', revspec, path])
contents = p.stdout.read()
errmsg = force_text(p.stderr.read())
failure = p.wait()
self._check_error(errmsg)
if failure:
raise FileNotFoundError(path=path,
revision=revspec,
detail=errmsg)
return contents
def get_file_exists(self, path, revspec):
"""Return whether a file exists in the repository.
This expects a path within the repository and a Bazaar revision
specifier.
Args:
path (unicode):
The path to the file within the repository.
revspec (unicode):
The Bazaar revision specifier used to look up the file.
Returns:
bool:
``True`` if the file exists in the repository. ``False`` if not.
"""
path = self._build_repo_path(path)
p = self._run_bzr(['cat', '-r', revspec, path])
errmsg = force_text(p.stderr.read())
ret_code = p.wait()
self._check_error(errmsg)
return ret_code == 0
def _run_bzr(self, args):
"""Run a Bazaar command.
This will run :command:`bzr` with the specified arguments, and sets
up the environment to work with :command:`rbssh`.
Args:
args (list of unicode):
The list of arguments to pass to :command:`bzr`.
Returns:
subprocess.Popen:
The handle for the process.
"""
if not BZRClient._bzr_plugin_path:
BZRClient._bzr_plugin_path = (
'%s:%s' % (
os.path.join(os.path.dirname(__file__), 'plugins',
'bzrlib', 'plugins'),
os.environ.get(str('BZR_PLUGIN_PATH'), str('')))
).encode('utf-8')
return SCMTool.popen(
['bzr'] + args,
local_site_name=self.local_site_name,
env={
'BZR_PLUGIN_PATH': BZRClient._bzr_plugin_path,
'BZR_SSH': 'rbssh',
'TZ': 'UTC',
})
def _check_error(self, errmsg):
"""Check an error message from bzr and raise an exception if needed.
If the error is an internal error, it will be raised, without the
exception. If it's a known error that we can report better information
on, then that information will be raised.
Args:
errmsg (unicode):
The error message.
Raises:
reviewboard.scmtools.errors.SCMError:
A suitable error message, if an internal error was hit.
"""
if 'Bazaar has encountered an internal error' in errmsg:
if 'prefetch() takes exactly 2 arguments (1 given)' in errmsg:
errmsg = ('Installed bzr and paramiko modules are '
'incompatible. See '
'https://bugs.launchpad.net/bzr/+bug/1524066')
else:
errmsg = errmsg.split(
'Traceback (most recent call last):')[0].strip()
raise SCMError(errmsg)
def _build_repo_path(self, path):
"""Return a path for a repository or file within a repository.
The returned path is based on the repository path and the provided
path within the repository. The resulting path can be passed to
:py:meth:`_run_bzr`.
Args:
path (unicode):
The path within the repository.
Returns:
unicode:
The resulting repository path.
"""
return '%s/%s' % (self.path, path.lstrip('/'))
|
|
import unittest
import unittest.mock
import io
import re
from g1.bases import datetimes
from g1.operations.cores import alerts
class ConfigTest(unittest.TestCase):
def test_destination_slack(self):
with self.assertRaisesRegex(AssertionError, r'expect true'):
alerts.Config.SlackDestination()
def test_load_null(self):
actual = alerts.Config._load_data(b'{"destination": {"kind": "null"}}')
self.assertEqual(
actual,
alerts.Config(destination=alerts.Config.NullDestination()),
)
self.assertIsInstance(
actual.destination,
alerts.Config.NullDestination,
)
def test_load_slack(self):
actual = alerts.Config._load_data(
b'{"destination": {"kind": "slack", "webhook": "x"}}'
)
self.assertEqual(
actual,
alerts.Config(
destination=alerts.Config.SlackDestination(webhook='x'),
),
)
self.assertIsInstance(
actual.destination,
alerts.Config.SlackDestination,
)
class SyslogTest(unittest.TestCase):
@unittest.mock.patch.object(alerts, 'datetimes')
def test_parse_syslog_entry(self, mock_datetimes):
mock_datetimes.utcnow.return_value = None
self.assertEqual(
alerts._parse_syslog_entry(
[
alerts.Config.Rule(
pattern=re.compile(r'does not match'),
template=None,
),
alerts.Config.Rule(
pattern=re.
compile(r'(?P<level>ERROR) (?P<raw_message>.*)'),
template=alerts.Config.Rule.Template(
level='{level}',
title='{title}',
description='{raw_message}',
),
),
],
'some prefix ERROR this is an error message',
'foobar',
),
alerts.Message(
host='foobar',
level=alerts.Message.Levels.ERROR,
title='syslog',
description='this is an error message',
timestamp=None,
),
)
class JournalTest(unittest.TestCase):
def test_parse_journal_entry(self):
self.assertIsNone(
alerts._parse_journal_entry(
[
alerts.Config.Rule(
pattern=re.compile(r'something'),
template=alerts.Config.Rule.Template(
level='ERROR',
title='{title}',
description='{raw_message}',
),
)
],
{'MESSAGE': 'no match'},
'foobar',
'01234567-89ab-cdef-0123-456789abcdef',
)
)
self.assertIsNone(
alerts._parse_journal_entry(
[
alerts.Config.Rule(
pattern=re.compile(r'something'),
template=None,
)
],
{'MESSAGE': 'this has something'},
'foobar',
'01234567-89ab-cdef-0123-456789abcdef',
)
)
for message in (
'INFO this has something',
list(b'INFO this has something'),
):
with self.subTest(message):
self.assertEqual(
alerts._parse_journal_entry(
[
alerts.Config.Rule(
pattern=re.compile(
r'(?P<level>INFO) '
r'this (?P<raw_message>.* something)'
),
template=alerts.Config.Rule.Template(
level='{level}',
title='{title}',
description='{raw_message}',
),
)
],
{
'SYSLOG_IDENTIFIER': 'spam',
'MESSAGE': message,
'_SOURCE_REALTIME_TIMESTAMP': '1001200200',
},
'foobar',
'01234567-89ab-cdef-0123-456789abcdef',
),
alerts.Message(
host='foobar',
level=alerts.Message.Levels.INFO,
title='spam',
description='has something',
timestamp=datetimes.utcfromtimestamp(1001.2002),
),
)
class CollectdTest(unittest.TestCase):
def test_parse_collectd_notification(self):
self.assertEqual(
alerts.parse_collectd_notification(
io.StringIO(
'''\
Severity: OKAY
Time: 1234.567
Host: foobar
Plugin: cpu
PluginInstance: 0
Type: cpu
TypeInstance: idle
DataSource: value
CurrentValue: 2.000000e+01
WarningMin: 1.000000e+01
WarningMax: nan
FailureMin: 5.000000e+00
FailureMax: nan
Some message.
Second line of message.
'''
)
),
alerts.Message(
host='foobar',
level=alerts.Message.Levels.GOOD,
title='cpu/0/idle: 20.00 >= 10.00',
description='Some message.\nSecond line of message.\n',
timestamp=datetimes.utcfromtimestamp(1234.567),
),
)
def test_make_title_from_collectd_headers(self):
self.assertEqual(
alerts._make_title_from_collectd_headers({
'Plugin': 'foobar',
}),
'foobar',
)
self.assertEqual(
alerts._make_title_from_collectd_headers({
'Plugin': 'cpu',
}),
'cpu/?/?: nan',
)
for current_value, expect in [
('20', 'cpu/0/idle: 20.00 < 30.00'),
('30', 'cpu/0/idle: 30.00 < 40.00'),
('50', 'cpu/0/idle: 40.00 <= 50.00 <= 60.00'),
('70', 'cpu/0/idle: 70.00 > 60.00'),
('80', 'cpu/0/idle: 80.00 > 70.00'),
]:
with self.subTest((current_value, expect)):
self.assertEqual(
alerts._make_title_from_collectd_headers({
'Plugin':
'cpu',
'PluginInstance':
'0',
'TypeInstance':
'idle',
'CurrentValue':
current_value,
'FailureMin':
'30',
'WarningMin':
'40',
'WarningMax':
'60',
'FailureMax':
'70',
}),
expect,
)
for current_value, expect in [
('20', 'cpu/?/?: 20.00 <= 60.00'),
('30', 'cpu/?/?: 30.00 <= 60.00'),
('50', 'cpu/?/?: 50.00 <= 60.00'),
('70', 'cpu/?/?: 70.00 > 60.00'),
('80', 'cpu/?/?: 80.00 > 70.00'),
]:
with self.subTest((current_value, expect)):
self.assertEqual(
alerts._make_title_from_collectd_headers({
'Plugin':
'cpu',
'CurrentValue':
current_value,
'FailureMin':
'nan',
'WarningMin':
'nan',
'WarningMax':
'60',
'FailureMax':
'70',
}),
expect,
)
for current_value, expect in [
('20', 'cpu/?/?: 20.00 < 30.00'),
('30', 'cpu/?/?: 30.00 < 40.00'),
('50', 'cpu/?/?: 50.00 >= 40.00'),
('70', 'cpu/?/?: 70.00 >= 40.00'),
('80', 'cpu/?/?: 80.00 >= 40.00'),
]:
with self.subTest((current_value, expect)):
self.assertEqual(
alerts._make_title_from_collectd_headers({
'Plugin':
'cpu',
'CurrentValue':
current_value,
'FailureMin':
'30',
'WarningMin':
'40',
'WarningMax':
'nan',
'FailureMax':
'nan',
}),
expect,
)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple peak fitting utility with Lmfit
======================================
Current fitting backend: Lmfit_
.. _Lmfit: https://lmfit.github.io/lmfit-py/
"""
#: BASE
import numpy as np
from matplotlib.pyplot import cm
#: LMFIT IMPORTS
from lmfit.models import ConstantModel, VoigtModel
#: SLOTH
from sloth.utils.matplotlib import get_colors
from sloth.utils.logging import getLogger
_logger = getLogger("sloth.fit.peakfit_lmfit", level="INFO")
def fit_peak(
x,
y,
num=1,
positions=[None],
amplitudes=[None],
widths=[None],
expressions=None,
bkgModel=None,
peakModel=None,
):
"""peak fit with lmfit
Description
-----------
This peak fitting model is built to fit one to three peaks (with prefixes:
'p1_', 'p2_', 'p3_'). The main control parameter is the initial guess of the
peaks positions.
Notes
-----
For the Gaussian function, amplitude means weighting factor
multiplying a unit-normalized Gaussian, so that the maximum height
at the centroid is Amplitude/(sqrt(2pi)*sigma), and that the
full-width at half maximum is ~2.355 sigma. In the fit, amplitude,
center, and sigma can be varied, while height and fwhm are
reported values, derived from these quantities.
To guess the Gaussian amplitude (A) from the peak maximum (H) and
a guess width (W), one could use the simple relation::
A ~ 5.90 * H * W
Parameters
----------
num : int
number of peaks to fit: currently between 1 and 3 [1]
positions : list of floats
initial peaks positions
amplitudes : list of floats
initial peaks amplitudes
widths : list of floats
initial peaks widths
expressions : None or dict
parameters expressions
bkgModel : None or lmfit.Model (optional)
if None: ConstantModel
peakModel : None or lmfit.Model (optional)
if None: VoigtModel
Returns
-------
lmfit.fit object
"""
if num > 3:
_logger.error("current model is limited to 3 peaks only!")
return None
if (len(positions) < num) or (len(amplitudes) < num) or (len(widths) < num):
_logger.error("'positions'/'amplitudes'/'widths' < 'num'!")
return None
if bkgModel is None:
bkgModel = ConstantModel
if peakModel is None:
peakModel = VoigtModel
bkg = bkgModel(prefix="bkg_")
pars = bkg.guess(y, x=x)
pars["bkg_c"].set(y.min())
mod = bkg
for ipk in range(num):
pkPos = positions[ipk]
pkAmp = amplitudes[ipk]
pkW = widths[ipk]
pfx = f"p{ipk+1}_"
xmax = x[np.argmax(y)]
ymax = y.max()
if pkPos is None:
_logger.info(f"{pfx} center guess at x={xmax}")
pkPos = xmax
positions[ipk] = pkPos
if pkAmp is None:
_logger.info(f"{pfx} amplitude guess at y={ymax}")
pkAmp = ymax
amplitudes[ipk] = pkAmp
if pkW is None:
pkW = 1
_logger.info(f"{pfx} width guess {pkW}")
widths[ipk] = pkW
pk = peakModel(prefix=pfx)
pars.update(pk.make_params())
pars[f"{pfx}center"].set(pkPos)
pars[f"{pfx}amplitude"].set(pkAmp)
pars[f"{pfx}sigma"].set(pkW)
#: force side peaks to stay same side of the main peak
if not (ipk == 0):
if pkPos < positions[0]:
pars[f"{pfx}center"].set(pkPos, max=positions[0])
else:
pars[f"{pfx}center"].set(pkPos, min=positions[0])
mod += pk
#: set mathematical constraints if given
if expressions is not None:
assert type(expressions) is dict, "Expressions should be a dictionary"
for key, value in expressions.items():
try:
pars[key].set(expr=value)
except KeyError:
_logger.warning(f"[fit_peak] cannot set expression 'key':'value'")
_logger.info("Running fit...")
fitobj = mod.fit(y, pars, x=x)
return fitobj
def get_curves_fit(x, fitobj, components="p", with_initial_guess=False):
"""get a list of curves from the fit object
Parameters
----------
x : array
fitobj : lmfit.model.fit object
components : False or str (optional)
if give, include components starting with 'components' string
default is 'p' (=peaks only)
Returns
-------
curves = [[x, y_best, {'legend': 'best fit', 'color': 'red'}]
[x, y_initial, {'legend': 'initial guess', 'color': 'gray'}]
[x, y_componentN], {'legend': 'component prefix N', 'color': 'pink'}]
]
"""
curves = []
curve_dict = {
"legend": "best fit",
"label": "best fit",
"color": "red",
"linewidth": 1,
"linestyle": "-",
}
curve = [x, fitobj.best_fit, curve_dict]
curves.append(curve)
if with_initial_guess:
guess_dict = {
"legend": "initial guess",
"label": "initial guess",
"color": "gray",
"linewidth": 0.5,
"linestyle": "-",
}
curve = [x, fitobj.init_fit, guess_dict]
curves.append(curve)
if components:
comps = fitobj.eval_components()
_logger.debug(f"Available fit components are: {comps.keys()}")
colors = get_colors(len(comps.keys()), colormap=cm.viridis)
for icomp, kcomp in enumerate(comps.keys()):
if kcomp.startswith(components):
comp_dict = {
"legend": f"{kcomp}",
"label": f"{kcomp}",
"color": colors[icomp],
"linewidth": 1,
"linestyle": "-",
}
curve = [x, comps[kcomp], comp_dict]
curves.append(curve)
return curves
def main_test():
"""Test and show example usage"""
import matplotlib.pyplot as plt
from lmfit.lineshapes import gaussian
from lmfit.models import GaussianModel
def _get_gauss(x, amp, cen, sigma, noise):
signal = gaussian(x, amplitude=amp, center=cen, sigma=sigma)
signal += noise * np.random.random(size=signal.shape)
return signal
x = np.linspace(-100, 100, 200)
y1 = _get_gauss(x, 100, 0, 5, 0.2)
y2 = _get_gauss(x, 60, -18, 10, 0.1)
y3 = _get_gauss(x, 90, 10, 10, 0.2)
y = 0.0015 * x + y1 + y2 + y3
figname = "test_peakfit_lmfit"
ymax = y.max()
xmax = x[np.argmax(y)]
fitobj = fit_peak(
x,
y,
num=3,
positions=[xmax, xmax - 20, xmax + 17],
amplitudes=[ymax, ymax / 2.0, ymax / 3.0],
widths=[1, 1, 1],
peakModel=GaussianModel,
)
fit_curves = get_curves_fit(x, fitobj, with_initial_guess=True)
#: plot
plt.ion()
plt.close(figname)
fig, ax = plt.subplots(num=figname)
ax.plot(x, y, label="data", color="black")
for fc in fit_curves:
ax.plot(fc[0], fc[1], label=fc[2]["legend"], color=fc[2]["color"])
ax.legend(loc="best")
plt.show()
return fig, ax
if __name__ == "__main__":
fig, ax = main_test()
|
|
import os
import environ
# PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
# PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
# BASE_DIR = PACKAGE_ROOT
# PROJECT_ROOT = environ.Path(__file__) - 2
ROOT_DIR = environ.Path(__file__) - 4
APP_DIR = ROOT_DIR.path('src')
PROJECT_ROOT = APP_DIR.path('venv')
env = environ.Env()
# This section added from an update to standards in CookieCutter Django to ensure no errors are encountered at runserver/migrations
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# DEBUG = env.bool('DJANGO_DEBUG', False)
# DATABASES = {
# "default": {
# "ENGINE": "django.db.backends.sqlite3",
# "NAME": str(ROOT_DIR.path('db.sqlite3')),# "dev.db",
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'djangogirls',
'USER': 'name',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 8,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
ALLOWED_HOSTS = [
"localhost",
"127.0.0.1",
"lhventapp.pythonanywhere.com",
]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "UTC"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = int(os.environ.get("SITE_ID", 1))
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = str(ROOT_DIR("site_media/media"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory static files should be collected to.
# Don"t put anything in this directory yourself; store your static files
# in apps" "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = str(ROOT_DIR("site_media/static"))
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/site_media/static/"
# Additional locations of static files
STATICFILES_DIRS = [
str(APP_DIR.path("static/dist")),
]
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
# Make this unique, and don't share it with anybody.
# SECRET_KEY = "hyv*5n&9v&1xql7i7()e+(w5b#07aogy_t6uh#evuhni%xi(@o"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
str(ROOT_DIR.path("templates")),
],
"APP_DIRS": True,
"OPTIONS": {
"debug": DEBUG,
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
"account.context_processors.account",
"pinax_theme_bootstrap.context_processors.theme",
],
},
},
]
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.auth.middleware.SessionAuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "LHV_app.urls"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "LHV_app.wsgi.application"
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
"django.contrib.sites",
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = (
# theme
"bootstrapform",
"pinax_theme_bootstrap",
# external
"account",
"pinax.eventlog",
"pinax.webanalytics",
"versatileimagefield",
"schedule",
"datetimewidget",
"phonenumber_field",
"rest_framework",
)
LOCAL_APPS = (
"LHV_app",
"company",
"customer_finance",
"equipment",
"finance",
"hourly",
"operation_finance",
"stock",
"service",
"time_log",
"work_order",
"employee",
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler"
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
}
}
FIXTURE_DIRS = [
str(APP_DIR.path("fixtures")),
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_EMAIL_UNIQUE = True
ACCOUNT_EMAIL_CONFIRMATION_REQUIRED = False
ACCOUNT_LOGIN_REDIRECT_URL = "home"
ACCOUNT_LOGOUT_REDIRECT_URL = "home"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 2
ACCOUNT_USE_AUTH_AUTHENTICATE = True
AUTHENTICATION_BACKENDS = [
"account.auth_backends.UsernameAuthenticationBackend",
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'PAGE_SIZE': 10
}
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For miscellaneous util methods used with volume."""
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume import throttling
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class NotifyUsageTestCase(test.TestCase):
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_volume_usage(self, mock_rpc, mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_volume_usage(mock.sentinel.context,
mock.sentinel.volume,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume)
mock_rpc.get_notifier.assert_called_once_with('volume', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'volume.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_volume_usage_with_kwargs(self, mock_rpc, mock_conf,
mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_volume_usage(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume, a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('volume', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'volume.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_usage(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume)
mock_rpc.get_notifier.assert_called_once_with('replication', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_usage_with_kwargs(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_usage(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('replication', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_error(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_error(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume)
mock_rpc.get_notifier.assert_called_once_with('replication', 'host1')
mock_rpc.get_notifier.return_value.error.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_volume')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_replication_error_with_kwargs(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_replication_error(
mock.sentinel.context,
mock.sentinel.volume,
'test_suffix',
extra_error_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.volume,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('replication', 'host2')
mock_rpc.get_notifier.return_value.error.assert_called_once_with(
mock.sentinel.context,
'replication.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_snapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_snapshot_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_snapshot_usage(
mock.sentinel.context,
mock.sentinel.snapshot,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.snapshot)
mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'snapshot.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_snapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_snapshot_usage_with_kwargs(self, mock_rpc, mock_conf,
mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_snapshot_usage(
mock.sentinel.context,
mock.sentinel.snapshot,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.snapshot,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'snapshot.test_suffix',
mock_usage.return_value)
def test_usage_from_snapshot(self):
raw_snapshot = {
'project_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'volume': {'availability_zone': 'nova'},
'volume_id': '55614621',
'volume_size': 1,
'id': '343434a2',
'display_name': '11',
'created_at': '2014-12-11T10:10:00',
'status': 'pause',
'deleted': '',
}
usage_info = volume_utils._usage_from_snapshot(
mock.sentinel.context,
raw_snapshot)
expected_snapshot = {
'tenant_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'availability_zone': 'nova',
'volume_id': '55614621',
'volume_size': 1,
'snapshot_id': '343434a2',
'display_name': '11',
'created_at': '2014-12-11T10:10:00',
'status': 'pause',
'deleted': '',
}
self.assertEqual(expected_snapshot, usage_info)
@mock.patch('cinder.volume.utils._usage_from_consistencygroup')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_consistencygroup_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_consistencygroup_usage(
mock.sentinel.context,
mock.sentinel.consistencygroup,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.consistencygroup)
mock_rpc.get_notifier.assert_called_once_with('consistencygroup',
'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'consistencygroup.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_consistencygroup')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_consistencygroup_usage_with_kwargs(self, mock_rpc,
mock_conf,
mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_consistencygroup_usage(
mock.sentinel.context,
mock.sentinel.consistencygroup,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.consistencygroup,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('consistencygroup',
'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'consistencygroup.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_cgsnapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_cgsnapshot_usage(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_cgsnapshot_usage(
mock.sentinel.context,
mock.sentinel.cgsnapshot,
'test_suffix')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.cgsnapshot)
mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host1')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'cgsnapshot.test_suffix',
mock_usage.return_value)
@mock.patch('cinder.volume.utils._usage_from_cgsnapshot')
@mock.patch('cinder.volume.utils.CONF')
@mock.patch('cinder.volume.utils.rpc')
def test_notify_about_cgsnapshot_usage_with_kwargs(self, mock_rpc,
mock_conf, mock_usage):
mock_conf.host = 'host1'
output = volume_utils.notify_about_cgsnapshot_usage(
mock.sentinel.context,
mock.sentinel.cgsnapshot,
'test_suffix',
extra_usage_info={'a': 'b', 'c': 'd'},
host='host2')
self.assertIsNone(output)
mock_usage.assert_called_once_with(mock.sentinel.context,
mock.sentinel.cgsnapshot,
a='b', c='d')
mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host2')
mock_rpc.get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.context,
'cgsnapshot.test_suffix',
mock_usage.return_value)
class LVMVolumeDriverTestCase(test.TestCase):
def test_convert_blocksize_option(self):
# Test valid volume_dd_blocksize
bs, count = volume_utils._calculate_count(1024, '10M')
self.assertEqual(bs, '10M')
self.assertEqual(count, 103)
bs, count = volume_utils._calculate_count(1024, '1xBBB')
self.assertEqual(bs, '1M')
self.assertEqual(count, 1024)
# Test 'volume_dd_blocksize' with fraction
bs, count = volume_utils._calculate_count(1024, '1.3M')
self.assertEqual(bs, '1M')
self.assertEqual(count, 1024)
# Test zero-size 'volume_dd_blocksize'
bs, count = volume_utils._calculate_count(1024, '0M')
self.assertEqual(bs, '1M')
self.assertEqual(count, 1024)
# Test negative 'volume_dd_blocksize'
bs, count = volume_utils._calculate_count(1024, '-1M')
self.assertEqual(bs, '1M')
self.assertEqual(count, 1024)
# Test non-digital 'volume_dd_blocksize'
bs, count = volume_utils._calculate_count(1024, 'ABM')
self.assertEqual(bs, '1M')
self.assertEqual(count, 1024)
class OdirectSupportTestCase(test.TestCase):
@mock.patch('cinder.utils.execute')
def test_check_for_odirect_support(self, mock_exec):
output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def')
self.assertTrue(output)
mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc',
'of=/dev/def', 'oflag=direct',
run_as_root=True)
mock_exec.reset_mock()
output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def',
'iflag=direct')
self.assertTrue(output)
mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc',
'of=/dev/def', 'iflag=direct',
run_as_root=True)
@mock.patch('cinder.utils.execute',
side_effect=processutils.ProcessExecutionError)
def test_check_for_odirect_support_error(self, mock_exec):
output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def')
self.assertFalse(output)
mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc',
'of=/dev/def', 'oflag=direct',
run_as_root=True)
class ClearVolumeTestCase(test.TestCase):
@mock.patch('cinder.volume.utils.copy_volume', return_value=None)
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_conf(self, mock_conf, mock_copy):
mock_conf.volume_clear = 'zero'
mock_conf.volume_clear_size = 0
mock_conf.volume_dd_blocksize = '1M'
mock_conf.volume_clear_ionice = '-c3'
output = volume_utils.clear_volume(1024, 'volume_path')
self.assertIsNone(output)
mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1024,
'1M', sync=True,
execute=utils.execute, ionice='-c3',
throttle=None)
@mock.patch('cinder.volume.utils.copy_volume', return_value=None)
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_args(self, mock_conf, mock_copy):
mock_conf.volume_clear = 'shred'
mock_conf.volume_clear_size = 0
mock_conf.volume_dd_blocksize = '1M'
mock_conf.volume_clear_ionice = '-c3'
output = volume_utils.clear_volume(1024, 'volume_path', 'zero', 1,
'-c0')
self.assertIsNone(output)
mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1,
'1M', sync=True,
execute=utils.execute, ionice='-c0',
throttle=None)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_shred(self, mock_conf, mock_exec):
mock_conf.volume_clear = 'shred'
mock_conf.volume_clear_size = 1
mock_conf.volume_clear_ionice = None
output = volume_utils.clear_volume(1024, 'volume_path')
self.assertIsNone(output)
mock_exec.assert_called_once_with(
'shred', '-n3', '-s1MiB', "volume_path", run_as_root=True)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_shred_not_clear_size(self, mock_conf, mock_exec):
mock_conf.volume_clear = 'shred'
mock_conf.volume_clear_size = None
mock_conf.volume_clear_ionice = None
output = volume_utils.clear_volume(1024, 'volume_path')
self.assertIsNone(output)
mock_exec.assert_called_once_with(
'shred', '-n3', "volume_path", run_as_root=True)
@mock.patch('cinder.volume.utils.CONF')
def test_clear_volume_invalid_opt(self, mock_conf):
mock_conf.volume_clear = 'non_existent_volume_clearer'
mock_conf.volume_clear_size = 0
mock_conf.volume_clear_ionice = None
self.assertRaises(exception.InvalidConfigurationValue,
volume_utils.clear_volume,
1024, "volume_path")
class CopyVolumeTestCase(test.TestCase):
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=True)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.utils.CONF')
def test_copy_volume_dd_iflag_and_oflag(self, mock_conf, mock_exec,
mock_support, mock_count):
fake_throttle = throttling.Throttle(['fake_throttle'])
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', 'iflag=direct',
'oflag=direct', run_as_root=True)
mock_exec.reset_mock()
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=False, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', 'iflag=direct',
'oflag=direct', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_no_iflag_or_oflag(self, mock_exec,
mock_support, mock_count):
fake_throttle = throttling.Throttle(['fake_throttle'])
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', 'conv=fdatasync',
run_as_root=True)
mock_exec.reset_mock()
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=False, execute=utils.execute,
ionice=None, throttle=fake_throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('fake_throttle', 'dd',
'if=/dev/zero',
'of=/dev/null', 'count=5678',
'bs=1234', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_no_throttle(self, mock_exec, mock_support,
mock_count):
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice=None)
self.assertIsNone(output)
mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null',
'count=5678', 'bs=1234',
'conv=fdatasync', run_as_root=True)
@mock.patch('cinder.volume.utils._calculate_count',
return_value=(1234, 5678))
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False)
@mock.patch('cinder.utils.execute')
def test_copy_volume_dd_with_ionice(self, mock_exec,
mock_support, mock_count):
output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, 1,
sync=True, execute=utils.execute,
ionice='-c3')
self.assertIsNone(output)
mock_exec.assert_called_once_with('ionice', '-c3', 'dd',
'if=/dev/zero', 'of=/dev/null',
'count=5678', 'bs=1234',
'conv=fdatasync', run_as_root=True)
class VolumeUtilsTestCase(test.TestCase):
def test_null_safe_str(self):
self.assertEqual('', volume_utils.null_safe_str(None))
self.assertEqual('', volume_utils.null_safe_str(False))
self.assertEqual('', volume_utils.null_safe_str(0))
self.assertEqual('', volume_utils.null_safe_str([]))
self.assertEqual('', volume_utils.null_safe_str(()))
self.assertEqual('', volume_utils.null_safe_str({}))
self.assertEqual('', volume_utils.null_safe_str(set()))
self.assertEqual('a', volume_utils.null_safe_str('a'))
self.assertEqual('1', volume_utils.null_safe_str(1))
self.assertEqual('True', volume_utils.null_safe_str(True))
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning')
def test_supports_thin_provisioning(self, mock_supports_thin, mock_helper):
self.assertEqual(mock_supports_thin.return_value,
volume_utils.supports_thin_provisioning())
mock_helper.assert_called_once_with()
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes')
def test_get_all_physical_volumes(self, mock_get_vols, mock_helper):
self.assertEqual(mock_get_vols.return_value,
volume_utils.get_all_physical_volumes())
mock_helper.assert_called_once_with()
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_volume_groups')
def test_get_all_volume_groups(self, mock_get_groups, mock_helper):
self.assertEqual(mock_get_groups.return_value,
volume_utils.get_all_volume_groups())
mock_helper.assert_called_once_with()
def test_generate_password(self):
password = volume_utils.generate_password()
self.assertTrue(any(c for c in password if c in '23456789'))
self.assertTrue(any(c for c in password
if c in 'abcdefghijkmnopqrstuvwxyz'))
self.assertTrue(any(c for c in password
if c in 'ABCDEFGHJKLMNPQRSTUVWXYZ'))
self.assertEqual(16, len(password))
self.assertEqual(10, len(volume_utils.generate_password(10)))
@mock.patch('cinder.volume.utils.generate_password')
def test_generate_username(self, mock_gen_pass):
output = volume_utils.generate_username()
self.assertEqual(mock_gen_pass.return_value, output)
def test_extract_host(self):
host = 'Host'
# default level is 'backend'
self.assertEqual(
volume_utils.extract_host(host), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'host'), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'backend'), 'Host')
# default_pool_name doesn't work for level other than 'pool'
self.assertEqual(
volume_utils.extract_host(host, 'host', True), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'host', False), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'backend', True), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'backend', False), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'pool'), None)
self.assertEqual(
volume_utils.extract_host(host, 'pool', True), '_pool0')
host = 'Host@Backend'
self.assertEqual(
volume_utils.extract_host(host), 'Host@Backend')
self.assertEqual(
volume_utils.extract_host(host, 'host'), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'backend'), 'Host@Backend')
self.assertEqual(
volume_utils.extract_host(host, 'pool'), None)
self.assertEqual(
volume_utils.extract_host(host, 'pool', True), '_pool0')
host = 'Host@Backend#Pool'
self.assertEqual(
volume_utils.extract_host(host), 'Host@Backend')
self.assertEqual(
volume_utils.extract_host(host, 'host'), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'backend'), 'Host@Backend')
self.assertEqual(
volume_utils.extract_host(host, 'pool'), 'Pool')
self.assertEqual(
volume_utils.extract_host(host, 'pool', True), 'Pool')
host = 'Host#Pool'
self.assertEqual(
volume_utils.extract_host(host), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'host'), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'backend'), 'Host')
self.assertEqual(
volume_utils.extract_host(host, 'pool'), 'Pool')
self.assertEqual(
volume_utils.extract_host(host, 'pool', True), 'Pool')
def test_append_host(self):
host = 'Host'
pool = 'Pool'
expected = 'Host#Pool'
self.assertEqual(expected,
volume_utils.append_host(host, pool))
pool = None
expected = 'Host'
self.assertEqual(expected,
volume_utils.append_host(host, pool))
host = None
pool = 'pool'
expected = None
self.assertEqual(expected,
volume_utils.append_host(host, pool))
host = None
pool = None
expected = None
self.assertEqual(expected,
volume_utils.append_host(host, pool))
|
|
# Copyright (c) 2017, John Skinner
import abc
import numpy as np
import logging
import database.entity
import core.image
import core.sequence_type
import core.image_source
import util.database_helpers as dh
import core.image_entity
class ImageCollection(core.image_source.ImageSource, database.entity.Entity, metaclass=abc.ABCMeta):
"""
A collection of images stored in the database.
This can be a sequential set of images like a video, or a random sampling of different pictures.
"""
def __init__(self, images, type_, db_client_, id_=None, **kwargs):
super().__init__(id_=id_, **kwargs)
self._images = images
if (isinstance(type_, core.sequence_type.ImageSequenceType) and
type_ is not core.sequence_type.ImageSequenceType.INTERACTIVE):
# image collections cannot be interactive
self._sequence_type = type_
else:
self._sequence_type = core.sequence_type.ImageSequenceType.NON_SEQUENTIAL
self._timestamps = sorted(self._images.keys())
self._current_index = 0
self._db_client = db_client_
if len(images) > 0:
image_ids = list(images.values())
self._is_depth_available = (db_client_.image_collection.find({
'_id': {'$in': image_ids},
'depth_data': None,
'left_depth_data': None,
}).count() <= 0)
self._is_labels_image_available = (db_client_.image_collection.find({
'_id': {'$in': image_ids},
'labels_data': None,
'left_labels_data': None
}).count() <= 0)
self._is_bboxes_available = (db_client_.image_collection.find({
'_id': {'$in': image_ids},
'metadata.labelled_objects': []
}).count() <= 0)
self._is_normals_available = (db_client_.image_collection.find({
'_id': {'$in': image_ids},
'world_normals_data': None,
'left_world_normals_data': None
}).count() <= 0)
self._is_stereo_available = (db_client_.image_collection.find({
'_id': {'$in': image_ids},
'left_data': None,
'right_data': None
}).count() <= 0)
else:
self._is_depth_available = False
self._is_labels_image_available = False
self._is_bboxes_available = False
self._is_normals_available = False
self._is_stereo_available = False
# Get some metadata from the first image in the collection
s_first_image = db_client_.image_collection.find_one({'_id': images[self._timestamps[0]]})
first_image = db_client_.deserialize_entity(s_first_image)
self._camera_intrinsics = first_image.metadata.camera_intrinsics
self._stereo_baseline = None
if self.is_stereo_available:
self._stereo_baseline = np.linalg.norm(first_image.left_camera_pose.location -
first_image.right_camera_pose.location)
def __len__(self):
"""
The length of the image collection
:return:
"""
return len(self._images)
def __iter__(self):
"""
Iterator for the image collection.
Returns the iterator over the inner images dict
:return:
"""
return self._images.items()
def __getitem__(self, item):
"""
Allow index-based access. Why not.
This is the same as get
:param item:
:return:
"""
return self.get(item)
@property
def sequence_type(self):
"""
Get the type of image sequence produced by this image source.
This is determined when creating the image collection
It is useful for determining which sources can run with which algorithms.
Image collections can be NON_SEQUENTIAL or SEQUENTIAL, but not INTERACTIVE
:return: The image sequence type enum
:rtype core.image_sequence.ImageSequenceType:
"""
return self._sequence_type
@property
def timestamps(self):
"""
Get the list of timestamps/indexes in this collection, in order.
They are the list of valid keys to get and __getitem__,
all others return None
:return:
"""
return self._timestamps
def begin(self):
"""
Start producing images.
Resets the current index to the start
:return: True
"""
self._current_index = 0
return True
def get(self, index):
"""
A getter for random access, since we're storing a list
:param index:
:return:
"""
if index in self._images:
return dh.load_object(self._db_client, self._db_client.image_collection, self._images[index])
return None
def get_next_image(self):
"""
Blocking get the next image from this source.
Parallel versions of this may add a timeout parameter.
Returning None indicates that this image source will produce no more images
:return: An Image object (see core.image) or None, and a timestamp or None
"""
if not self.is_complete():
timestamp = self._timestamps[self._current_index]
image = self.get(timestamp)
self._current_index += 1
return image, timestamp
return None, None
def is_complete(self):
"""
Have we got all the images from this source?
Some sources are infinite, some are not,
and this method lets those that are not end the iteration.
:return: True if there are more images to get, false otherwise.
"""
return self._current_index >= len(self._timestamps)
@property
def supports_random_access(self):
"""
Image collections support random access, they are a list of images
:return:
"""
return True
@property
def is_depth_available(self):
"""
Do the images in this sequence include depth
:return: True if depth is available for all images in this sequence
"""
return self._is_depth_available
@property
def is_per_pixel_labels_available(self):
"""
Do images from this image source include object lables
:return: True if this image source can produce object labels for each image
"""
return self._is_labels_image_available
@property
def is_labels_available(self):
"""
Do images from this source include object bounding boxes in their metadata.
:return: True iff the image metadata includes bounding boxes
"""
return self._is_bboxes_available
@property
def is_normals_available(self):
"""
Do images from this image source include world normals
:return: True if images have world normals associated with them
"""
return self._is_normals_available
@property
def is_stereo_available(self):
"""
Can this image source produce stereo images.
Some algorithms only run with stereo images
:return:
"""
return self._is_stereo_available
@property
def is_stored_in_database(self):
"""
Do this images from this source come from the database.
Image collections are always stored in the database
:return:
"""
return True
def get_camera_intrinsics(self):
"""
Get the camera intrinisics for this image collection.
At the moment it assumes it is the same for all images,
and just reads it from the first.
When I have effective metadata aggregation, read it from that.
:return:
"""
return self._camera_intrinsics
def get_stereo_baseline(self):
"""
Get the distance between the stereo cameras, or None if the images in this collection are not stereo.
:return:
"""
return self._stereo_baseline
def validate(self):
"""
The image sequence is valid iff all the contained images are valid
Only count the images that have a validate method
:return: True if all the images are valid, false if not
"""
with self:
while not self.is_complete():
image = self.get_next_image()
if hasattr(image, 'validate'):
if not image.validate():
return False
return True
def serialize(self):
serialized = super().serialize()
# Only include the image IDs here, they'll get turned back into objects for us
serialized['images'] = [(stamp, image) for stamp, image in self._images.items()]
if self.sequence_type is core.sequence_type.ImageSequenceType.SEQUENTIAL:
serialized['sequence_type'] = 'SEQ'
else:
serialized['sequence_type'] = 'NON'
return serialized
@classmethod
def deserialize(cls, serialized_representation, db_client, **kwargs):
"""
Load any collection of images.
This handles the weird chicken-and-egg problem of deserializing
the image collection and the individual images.
:param serialized_representation:
:param db_client: An instance of database.client, from which to load the image collection
:param kwargs: Additional arguments passed to the entity constructor.
These will be overridden by values in serialized representation
:return: A deserialized
"""
if 'images' in serialized_representation:
kwargs['images'] = {stamp: img_id for stamp, img_id in serialized_representation['images']}
if 'sequence_type' in serialized_representation and serialized_representation['sequence_type'] == 'SEQ':
kwargs['type_'] = core.sequence_type.ImageSequenceType.SEQUENTIAL
else:
kwargs['type_'] = core.sequence_type.ImageSequenceType.NON_SEQUENTIAL
kwargs['db_client_'] = db_client
return super().deserialize(serialized_representation, db_client, **kwargs)
@classmethod
def create_and_save(cls, db_client, image_map, sequence_type):
"""
Make an already serialized image collection.
Since, sometimes we have the image ids, but we don't want to have to load the objects to make the collection.
WARNING: This can create invalid serialized image collections, since it can't check the validity of the ids.
:param db_client: The database client, used to check image ids and for saving
:param image_map: A map of timestamp to bson.objectid.ObjectId that refer to image objects in the database
:param sequence_type: core.sequence_type.ImageSequenceType
:return: The id of the newly created image collection, or None if there is an error
"""
found_images = db_client.image_collection.find({
'_id': {'$in': list(image_map.values())}
}, {'_id': True}).count()
if not found_images == len(image_map):
logging.getLogger(__name__).warning(
"Tried to create image collection with {0} missing ids".format(len(image_map) - found_images))
return None
s_images_list = [(stamp, image_id) for stamp, image_id in image_map.items()]
s_seq_type = 'SEQ' if sequence_type is core.sequence_type.ImageSequenceType.SEQUENTIAL else 'NON'
existing = db_client.image_source_collection.find_one({
'_type': cls.__module__ + '.' + cls.__name__,
'images': {'$all': s_images_list},
'sequence_type': s_seq_type
}, {'_id': True})
if existing is not None:
return existing['_id']
else:
return db_client.image_source_collection.insert({
'_type': cls.__module__ + '.' + cls.__name__,
'images': s_images_list,
'sequence_type': s_seq_type
})
def delete_image_collection(db_client, image_collection_id):
"""
A helper to delete image collections and all the images contained therein.
Images contained in more than one collection will be retained.
:param db_client:
:param image_collection_id:
:return:
"""
# Find all the images.
s_collection = db_client.image_source_collection.find_one({'_id': image_collection_id}, {'images': True})
for _, image_id in s_collection['images']:
# Find the number of image collections containing this image id
if db_client.image_source_collection.find({'images': image_id}, limit=2).count() <= 1:
core.image_entity.delete_image(db_client, image_id)
db_client.image_source_collection.delete_one({'_id': image_collection_id})
|
|
import logging
import multiprocessing
import multiprocessing.pool
import os.path as osp
import shutil
import uuid
from smqtk.algorithms.relevancy_index import get_relevancy_index_impls
from smqtk.representation import DescriptorElementFactory
from smqtk.representation.descriptor_element.local_elements import DescriptorMemoryElement
from smqtk.representation.descriptor_index.memory import DescriptorMemoryIndex
from smqtk.utils import SmqtkObject
from smqtk.utils import plugin
from smqtk.utils import file_utils
DFLT_MEMORY_DESCR_FACTORY = DescriptorElementFactory(DescriptorMemoryElement,
{})
DFLT_REL_INDEX_CONFIG = {
"type": "LibSvmHikRelevancyIndex",
"LibSvmHikRelevancyIndex": {
"descr_cache_filepath": None,
}
}
class IqrResultsDict (dict):
"""
Dictionary subclass for containing DescriptorElement-to-float mapping.
We expect keys to be DescriptorElement instances and the values to be floats
between [0,1], inclusive.
"""
def __setitem__(self, i, v):
super(IqrResultsDict, self).__setitem__(i, float(v))
def update(self, other=None, **kwds):
"""
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k in F: D[k] = F[k]
Reimplemented so as to use override __setitem__ method.
"""
if hasattr(other, 'keys'):
for k in other:
self[k] = float(other[k])
elif other is not None:
for k, v in other:
self[k] = float(v)
for k in kwds:
self[k] = float(kwds[k])
class IqrSession (SmqtkObject):
"""
Encapsulation of IQR Session related data structures with a centralized lock
for multi-thread access.
This object is compatible with the python with-statement, so when elements
are to be used or modified, it should be within a with-block so race
conditions do not occur across threads/sub-processes.
"""
@property
def _log(self):
return logging.getLogger(
'.'.join((self.__module__, self.__class__.__name__)) +
"[%s]" % self.uuid
)
def __init__(self, work_directory, descriptor, nn_index,
pos_seed_neighbors=500,
rel_index_config=DFLT_REL_INDEX_CONFIG,
descriptor_factory=DFLT_MEMORY_DESCR_FACTORY,
session_uid=None):
""" Initialize the IQR session
This does not initialize the working index for ranking as there are no
known positive descriptor examples at this time.
Adjudications
-------------
Adjudications are carried through between initializations. This allows
indexed material adjudicated through-out the lifetime of the session to
stay relevant.
:param work_directory: Directory assigned to this session for temporary
and working files.
:type work_directory: str
:param descriptor: Descriptor to use for this IQR session
:type descriptor:
smqtk.algorithms.descriptor_generator.DescriptorGenerator
:param nn_index: NearestNeighborIndex to draw from when initializing IQR
session.
:type nn_index: smqtk.algorithms.nn_index.NearestNeighborsIndex
:param pos_seed_neighbors: Number of neighbors to pull from the given
``nn_index`` for each positive exemplar when populating the working
index, i.e. this value determines the size of the working index for
IQR refinement. By default, we try to get 500 neighbors.
Since there may be partial to significant overlap of near neighbors
as a result of nn_index queries for positive exemplars, the working
index may contain anywhere from this value's number of entries, to
``N*P``, where ``N`` is this value and ``P`` is the number of
positive examples at the time of working index initialization.
:type pos_seed_neighbors: int
:param rel_index_config: Plugin configuration dictionary for the
RelevancyIndex to use for ranking user adjudications. By default we
we use an in-memory libSVM based index using the histogram
intersection metric.
:type rel_index_config: dict
:param descriptor_factory: DescriptorElementFactory instance to use to
produce new descriptors in output extension data. By default, we
use a factory that produces in-memory descriptors.
:type descriptor_factory: DescriptorElementFactory
:param session_uid: Optional manual specification of session UUID.
:type session_uid: str or uuid.UUID
"""
self.uuid = session_uid or uuid.uuid1()
self.lock = multiprocessing.RLock()
# Local descriptor index for ranking, populated by a query to the
# nn_index instance.
# Added external data/descriptors not added to this index.
self.working_index = DescriptorMemoryIndex()
# Initialize book-keeping set so we know what positive descriptors
# UUIDs we've used to query the neighbor index with already.
#: :type: set[collections.Hashable]
self._wi_init_seeds = set()
# Descriptor references from our index (above) that have been
# adjudicated.
#: :type: set[smqtk.representation.DescriptorElement]
self.positive_descriptors = set()
#: :type: set[smqtk.representation.DescriptorElement]
self.negative_descriptors = set()
# Example pos/neg data and descriptors added to this session
# (external to our working index).
# All maps keyed on UUID values (some kind of content checksum,
# i.e. SHA1).
#: :type: dict[collections.Hashable, smqtk.representation.DataElement]
self.ex_data = dict()
#: :type: dict[collections.Hashable, smqtk.representation.DescriptorElement]
self.ex_pos_descriptors = dict()
#: :type: dict[collections.Hashable, smqtk.representation.DescriptorElement]
self.ex_neg_descriptors = dict()
self.pos_seed_neighbors = int(pos_seed_neighbors)
# Working directory assigned to this session
self._work_dir = work_directory
# Mapping of a DescriptorElement in our relevancy search index (not the
# index that the nn_index uses) to the relevancy score given the
# recorded positive and negative adjudications.
# This is None before any initialization or refinement occurs.
#: :type: None or dict of (collections.Hashable, float)
self.results = None
#
# Algorithm Instances [+Config]
#
# DescriptorGenerator instance assigned to this session.
self.descriptor = descriptor
# Factory for generating DescriptorElements of a configured impl type.
self.descriptor_factory = descriptor_factory
# NearestNeighborIndex instance assigned to this session.
self.nn_index = nn_index
# RelevancyIndex configuration and instance that is used for producing
# results.
# This is only [re]constructed when initializing the session.
self.rel_index_config = rel_index_config
# This is None until session initialization happens after pos/neg
# exemplar data has been added.
#: :type: None | smqtk.algorithms.relevancy_index.RelevancyIndex
self.rel_index = None
def __del__(self):
# Clean up working directory
if osp.isdir(self.work_dir):
shutil.rmtree(self.work_dir)
def __enter__(self):
"""
:rtype: IqrSession
"""
self.lock.acquire()
return self
# noinspection PyUnusedLocal
def __exit__(self, exc_type, exc_val, exc_tb):
self.lock.release()
@property
def work_dir(self):
file_utils.safe_create_dir(self._work_dir)
return self._work_dir
def ordered_results(self):
"""
Return a tuple of the current (id, probability) result pairs in
order of probability score. If there are no results yet, None is
returned.
:rtype: None | tuple[(smqtk.representation.DescriptorElement, float)]
"""
with self.lock:
if self.results:
return tuple(sorted(self.results.iteritems(),
key=lambda p: p[1],
reverse=True))
return None
def add_positive_data(self, *data_elements):
"""
Add one or more data elements to this IQR session as positive examples.
This produces descriptors on the input data with our configured
descriptor generator.
:param data_elements: Iterable of data elements to add as positive
examples.
:type data_elements: collections.Iterable[smqtk.representation.DataElement]
"""
with self.lock:
r = self.descriptor.compute_descriptor_async(
data_elements, self.descriptor_factory
)
for da in r:
self.ex_pos_descriptors[da.uuid()] = r[da]
self.ex_data[da.uuid()] = da
def add_negative_data(self, *data_elements):
"""
Add one or more data elements to this IQR session as negative examples.
This produces descriptors on the input data with our configured
descriptor generator.
:param data_elements: Iterable of data elements to add as positive
examples.
:type data_elements: collections.Iterable[smqtk.representation.DataElement]
"""
with self.lock:
r = self.descriptor.compute_descriptor_async(
data_elements, self.descriptor_factory
)
for da in r:
self.ex_neg_descriptors[da.uuid()] = r[da]
self.ex_data[da.uuid()] = da
def initialize(self):
"""
Initialize working index based on currently set positive exemplar data.
This takes into account the currently set positive data descriptors as
well as positively adjudicated descriptors from the lifetime of this
session.
:raises RuntimeError: There are no positive example descriptors in this
session to use as a basis for querying.
"""
if len(self.ex_pos_descriptors) + \
len(self.positive_descriptors) <= 0:
raise RuntimeError("No positive descriptors to query the neighbor "
"index with.")
# Not clearing index because this step is intended to be additive
# build up new working index
# TODO: Only query using new positives since previous queries
for p in self.ex_pos_descriptors.itervalues():
if p.uuid() not in self._wi_init_seeds:
self._log.info("Querying neighbors to: %s", p)
self.working_index.add_many_descriptors(
self.nn_index.nn(p, n=self.pos_seed_neighbors)[0]
)
self._wi_init_seeds.add(p.uuid())
for p in self.positive_descriptors:
if p.uuid() not in self._wi_init_seeds:
self._log.info("Querying neighbors to: %s", p)
self.working_index.add_many_descriptors(
self.nn_index.nn(p, n=self.pos_seed_neighbors)[0]
)
self._wi_init_seeds.add(p.uuid())
# Make new relevancy index
self._log.info("Creating new relevancy index over working index.")
#: :type: smqtk.algorithms.relevancy_index.RelevancyIndex
self.rel_index = plugin.from_plugin_config(self.rel_index_config,
get_relevancy_index_impls)
self.rel_index.build_index(self.working_index.iterdescriptors())
def adjudicate(self, new_positives=(), new_negatives=(),
un_positives=(), un_negatives=()):
"""
Update current state of working index positive and negative
adjudications based on descriptor UUIDs.
:param new_positives: Descriptors of elements in our working index to
now be considered to be positively relevant.
:type new_positives: collections.Iterable[smqtk.representation.DescriptorElement]
:param new_negatives: Descriptors of elements in our working index to
now be considered to be negatively relevant.
:type new_negatives: collections.Iterable[smqtk.representation.DescriptorElement]
:param un_positives: Descriptors of elements in our working index to now
be considered not positive any more.
:type un_positives: collections.Iterable[smqtk.representation.DescriptorElement]
:param un_negatives: Descriptors of elements in our working index to now
be considered not negative any more.
:type un_negatives: collections.Iterable[smqtk.representation.DescriptorElement]
"""
with self.lock:
self.positive_descriptors.update(new_positives)
self.positive_descriptors.difference_update(un_positives)
self.positive_descriptors.difference_update(new_negatives)
self.negative_descriptors.update(new_negatives)
self.negative_descriptors.difference_update(un_negatives)
self.negative_descriptors.difference_update(new_positives)
def refine(self):
""" Refine current model results based on current adjudication state
:raises RuntimeError: There are no adjudications to run on. We must have
at least one positive adjudication.
"""
with self.lock:
if not self.rel_index:
raise RuntimeError("No relevancy index yet. Must not have "
"initialized session (no working index).")
# fuse pos/neg adjudications + added positive data descriptors
pos = self.ex_pos_descriptors.values() + list(self.positive_descriptors)
neg = self.ex_neg_descriptors.values() + list(self.negative_descriptors)
if not pos:
raise RuntimeError("Did not find at least one positive "
"adjudication.")
id_probability_map = self.rel_index.rank(pos, neg)
if self.results is None:
self.results = IqrResultsDict()
self.results.update(id_probability_map)
# Force adjudicated positives and negatives to be probability 1 and
# 0, respectively, since we want to control where they show up in
# our results view.
# - Not all pos/neg descriptors may be in our working index.
for d in pos:
if d in self.results:
self.results[d] = 1.0
for d in neg:
if d in self.results:
self.results[d] = 0.0
def reset(self):
""" Reset the IQR Search state
No positive adjudications, reload original feature data
"""
with self.lock:
self.working_index.clear()
self.positive_descriptors.clear()
self.negative_descriptors.clear()
self.ex_pos_descriptors.clear()
self.ex_neg_descriptors.clear()
self.ex_data.clear()
self.rel_index = None
self.results = None
# clear contents of working directory
shutil.rmtree(self.work_dir)
|
|
""" Orbivo S20. """
import binascii
import struct
import logging
import socket
import threading
import time
_LOGGER = logging.getLogger(__name__)
# S20 UDP port
PORT = 10000
# UDP best-effort.
RETRIES = 3
TIMEOUT = 1.0
DISCOVERY_TIMEOUT = 1.0
# Timeout after which to renew device subscriptions
SUBSCRIPTION_TIMEOUT = 60
# Packet constants.
MAGIC = b'\x68\x64'
DISCOVERY = b'\x00\x06\x71\x61'
DISCOVERY_RESP = b'\x00\x2a\x71\x61'
SUBSCRIBE = b'\x00\x1e\x63\x6c'
SUBSCRIBE_RESP = b'\x00\x18\x63\x6c'
CONTROL = b'\x00\x17\x64\x63'
CONTROL_RESP = b'\x00\x17\x73\x66'
PADDING_1 = b'\x20\x20\x20\x20\x20\x20'
PADDING_2 = b'\x00\x00\x00\x00'
ON = b'\x01'
OFF = b'\x00'
# Socket
_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Buffer
_BUFFER = {}
def _listen():
""" Listen on socket. """
while True:
data, addr = _SOCKET.recvfrom(1024)
_BUFFER[addr[0]] = data
def _setup():
""" Set up module.
Open a UDP socket, and listen in a thread.
"""
_SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
_SOCKET.bind(('', PORT))
udp = threading.Thread(target=_listen, daemon=True)
udp.start()
def _device_time(tab):
ts = struct.unpack('<L', tab)[0] - 2208988800
return ts
def discover(timeout=DISCOVERY_TIMEOUT):
""" Discover devices on the local network.
:param timeout: Optional timeout in seconds.
:returns: Set of discovered host addresses.
"""
hosts = {}
payload = MAGIC + DISCOVERY
for _ in range(RETRIES):
_SOCKET.sendto(bytearray(payload), ('255.255.255.255', PORT))
start = time.time()
while time.time() < start + timeout:
for host, data in _BUFFER.copy().items():
if not _is_discovery_response(data):
continue
if host not in hosts:
_LOGGER.debug("Discovered device at %s", host)
entry = {}
entry['mac'] = data[7:13]
entry['imac'] = data[19:25]
entry['next'] = 0
entry['st'] = int(data[-1])
entry['time'] = _device_time(data[37:41])
entry['serverTime'] = int(time.time())
hosts[host] = entry
return hosts
def _is_discovery_response(data):
""" Is this a discovery response?
:param data: Payload.
"""
return data[0:6] == (MAGIC + DISCOVERY_RESP)
def _is_subscribe_response(data):
""" Is this a subscribe response?
:param data: Payload.
"""
return data[0:6] == (MAGIC + SUBSCRIBE_RESP)
def _is_control_response(data):
""" Is this a control response?
:param data: Payload.
"""
return data[0:6] == (MAGIC + CONTROL_RESP)
class S20Exception(Exception):
""" S20 exception. """
pass
class S20(object):
""" Controls an Orbivo S20 WiFi Smart Socket.
http://www.orvibo.com/en_products_view.asp?mid=15&pid=4&id=234
Protocol documentation: http://pastebin.com/LfUhsbcS
"""
def __init__(self, host, mac = None):
""" Initialize S20 object.
:param host: IP or hostname of device.
"""
self.host = host
if not mac:
(self._mac, self._mac_reversed) = self._discover_mac()
else:
if type(mac) is str:
self._mac = binascii.a2b_hex(''.join(mac.split(':')))
else:
self._mac = mac
ba = bytearray(self._mac)
ba.reverse()
self._mac_reversed = bytes(ba)
self._subscribe()
@property
def on(self):
""" State property.
:returns: State of device (on/off).
"""
return self._subscribe()
@on.setter
def on(self, state):
""" Change device state.
:param state: True (on) or False (off).
"""
if state:
self._turn_on()
else:
self._turn_off()
def _discover_mac(self):
""" Discovers MAC address of device.
Discovery is done by sending a UDP broadcast.
All configured devices reply. The response contains
the MAC address in both needed formats.
Discovery of multiple switches must be done synchronously.
:returns: Tuple of MAC address and reversed MAC address.
"""
mac = None
mac_reversed = None
cmd = MAGIC + DISCOVERY
resp = self._udp_transact(cmd, self._discovery_resp,
broadcast=True,
timeout=DISCOVERY_TIMEOUT)
if resp:
(mac, mac_reversed) = resp
if mac is None:
raise S20Exception("Couldn't discover {}".format(self.host))
return (mac, mac_reversed)
def _subscribe(self):
""" Subscribe to the device.
A subscription serves two purposes:
- Returns state (on/off).
- Enables state changes on the device
for a short period of time.
"""
cmd = MAGIC + SUBSCRIBE + self._mac \
+ PADDING_1 + self._mac_reversed + PADDING_1
status = self._udp_transact(cmd, self._subscribe_resp)
if status is not None:
self.last_subscribed = time.time()
return status == ON
else:
raise S20Exception(
"No status could be found for {}".format(self.host))
def _subscription_is_recent(self):
""" Check if subscription occurred recently.
:returns: Yes (True) or no (False)
"""
return self.last_subscribed > time.time() - SUBSCRIPTION_TIMEOUT
def _control(self, state):
""" Control device state.
Possible states are ON or OFF.
:param state: Switch to this state.
"""
# Renew subscription if necessary
if not self._subscription_is_recent():
self._subscribe()
cmd = MAGIC + CONTROL + self._mac + PADDING_1 + PADDING_2 + state
_LOGGER.debug("Sending new state to %s: %s", self.host, ord(state))
ack_state = self._udp_transact(cmd, self._control_resp, state)
if ack_state is None:
raise S20Exception(
"Device didn't acknowledge control request: {}".format(
self.host))
def _discovery_resp(self, data):
""" Handle a discovery response.
:param data: Payload.
:param addr: Address tuple.
:returns: MAC and reversed MAC.
"""
if _is_discovery_response(data):
_LOGGER.debug("Discovered MAC of %s: %s", self.host,
binascii.hexlify(data[7:13]).decode())
return (data[7:13], data[19:25])
def _subscribe_resp(self, data):
""" Handle a subscribe response.
:param data: Payload.
:returns: State (ON/OFF)
"""
if _is_subscribe_response(data):
status = bytes([data[23]])
_LOGGER.debug("Successfully subscribed to %s, state: %s",
self.host, ord(status))
return status
def _control_resp(self, data, state):
""" Handle a control response.
:param data: Payload.
:param state: Requested state.
:returns: Acknowledged state.
"""
if _is_control_response(data):
ack_state = bytes([data[22]])
if state == ack_state:
_LOGGER.debug("Received state ack from %s, state: %s",
self.host, ord(ack_state))
return ack_state
def _udp_transact(self, payload, handler, *args,
broadcast=False, timeout=TIMEOUT):
""" Complete a UDP transaction.
UDP is stateless and not guaranteed, so we have to
take some mitigation steps:
- Send payload multiple times.
- Wait for awhile to receive response.
:param payload: Payload to send.
:param handler: Response handler.
:param args: Arguments to pass to response handler.
:param broadcast: Send a broadcast instead.
:param timeout: Timeout in seconds.
"""
if self.host in _BUFFER:
del _BUFFER[self.host]
host = self.host
if broadcast:
host = '255.255.255.255'
retval = None
for _ in range(RETRIES):
_SOCKET.sendto(bytearray(payload), (host, PORT))
start = time.time()
while time.time() < start + timeout:
data = _BUFFER.get(self.host, None)
if data:
retval = handler(data, *args)
# Return as soon as a response is received
if retval:
return retval
def _turn_on(self):
""" Turn on the device. """
self._control(ON)
def _turn_off(self):
""" Turn off the device. """
self._control(OFF)
_setup()
|
|
import matplotlib.pyplot as plt
from matplotlib import dates
import numpy as np
import os
import sys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import copy
import calendar
import mysql.connector
timezone = -8
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
#select data (spikes and fire times already rmoved)
SP2_data_query = ('SELECT UNIX_UTC_6h_midtime, meas_mean_mass_conc, meas_rel_err, GC_v10_default, GC_default_rel_err, cluster,cluster_number FROM whi_gc_and_sp2_6h_mass_concs WHERE RH_threshold = 90 ORDER BY UNIX_UTC_6h_midtime')
cursor.execute(SP2_data_query)
raw_data = cursor.fetchall()
SP2_6h_NPac = []
SP2_6h_SPac = []
SP2_6h_Cont = []
SP2_6h_LRT = []
SP2_6h_BB = []
GC2009_BC_concs_d = {}
GC2010_BC_concs_d = {}
GC2012_BC_concs_d = {}
for row in raw_data:
UTC_ts = row[0]
PST_date_time = datetime.utcfromtimestamp(UTC_ts) + timedelta(hours = timezone)
meas_mass_conc = float(row[1])
meas_rel_err = float(row[2])
meas_abs_err = meas_rel_err*meas_mass_conc
GC_mass_conc = row[3]
GC_rel_err = 0#row[4]
GC_abs_err = GC_rel_err*GC_mass_conc
cluster = row[5]
ratio = GC_mass_conc/meas_mass_conc
ratio_abs_err = (meas_rel_err + GC_rel_err)*ratio
cluster_number = row[6]
if cluster == 'NPac':# and cluster_number ==3:
SP2_6h_NPac.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err,GC_mass_conc])
if cluster == 'SPac':
SP2_6h_SPac.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err,GC_mass_conc])
if cluster == 'Cont':
SP2_6h_Cont.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err,GC_mass_conc])
if cluster == 'GBPS':
SP2_6h_SPac.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err,GC_mass_conc])
if cluster == 'LRT':
SP2_6h_LRT.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err,GC_mass_conc])
if cluster == 'BB':# and cluster_number ==3:
SP2_6h_BB.append([PST_date_time,meas_mass_conc,meas_abs_err,ratio,ratio_abs_err,GC_mass_conc])
####################plotting
SP2_6h_NPac_date = [dates.date2num(row[0]) for row in SP2_6h_NPac]
SP2_6h_NPac_mass_conc = [row[1] for row in SP2_6h_NPac]
SP2_6h_NPac_abs_err = [row[2] for row in SP2_6h_NPac]
SP2_6h_SPac_date = [dates.date2num(row[0]) for row in SP2_6h_SPac]
SP2_6h_SPac_mass_conc = [row[1] for row in SP2_6h_SPac]
SP2_6h_SPac_abs_err = [row[2] for row in SP2_6h_SPac]
SP2_6h_Cont_date = [dates.date2num(row[0]) for row in SP2_6h_Cont]
SP2_6h_Cont_mass_conc = [row[1] for row in SP2_6h_Cont]
SP2_6h_Cont_abs_err = [row[2] for row in SP2_6h_Cont]
SP2_6h_LRT_date = [dates.date2num(row[0]) for row in SP2_6h_LRT]
SP2_6h_LRT_mass_conc = [row[1] for row in SP2_6h_LRT]
SP2_6h_LRT_abs_err = [row[2] for row in SP2_6h_LRT]
SP2_6h_BB_date = [dates.date2num(row[0]) for row in SP2_6h_BB]
SP2_6h_BB_mass_conc = [row[1] for row in SP2_6h_BB]
SP2_6h_BB_abs_err = [row[2] for row in SP2_6h_BB]
GC_6h_NPac_date = [dates.date2num(row[0]) for row in SP2_6h_NPac]
GC_6h_NPac_mass_conc = [row[5] for row in SP2_6h_NPac]
GC_6h_SPac_date = [dates.date2num(row[0]) for row in SP2_6h_SPac]
GC_6h_SPac_mass_conc = [row[5] for row in SP2_6h_SPac]
GC_6h_Cont_date = [dates.date2num(row[0]) for row in SP2_6h_Cont]
GC_6h_Cont_mass_conc = [row[5] for row in SP2_6h_Cont]
GC_6h_LRT_date = [dates.date2num(row[0]) for row in SP2_6h_LRT]
GC_6h_LRT_mass_conc = [row[5] for row in SP2_6h_LRT]
GC_6h_BB_date = [dates.date2num(row[0]) for row in SP2_6h_BB]
GC_6h_BB_mass_conc = [row[5] for row in SP2_6h_BB]
ratio_dates_NPac = [dates.date2num(row[0]) for row in SP2_6h_NPac]
ratio_mass_conc_NPac = [row[3] for row in SP2_6h_NPac]
ratio_err_NPac = [row[4] for row in SP2_6h_NPac]
ratio_dates_SPac = [dates.date2num(row[0]) for row in SP2_6h_SPac]
ratio_mass_conc_SPac = [row[3] for row in SP2_6h_SPac]
ratio_err_SPac = [row[4] for row in SP2_6h_SPac]
ratio_dates_Cont = [dates.date2num(row[0]) for row in SP2_6h_Cont]
ratio_mass_conc_Cont = [row[3] for row in SP2_6h_Cont]
ratio_err_Cont = [row[4] for row in SP2_6h_Cont]
ratio_dates_LRT = [dates.date2num(row[0]) for row in SP2_6h_LRT]
ratio_mass_conc_LRT = [row[3] for row in SP2_6h_LRT]
ratio_err_LRT = [row[4] for row in SP2_6h_LRT]
ratio_dates_BB = [dates.date2num(row[0]) for row in SP2_6h_BB]
ratio_mass_conc_BB = [row[3] for row in SP2_6h_BB]
ratio_err_BB = [row[4] for row in SP2_6h_BB]
#fire times for plotting shaded areas
fire_span2_09s=datetime.strptime('2009/07/27', '%Y/%m/%d') #dates follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011
fire_span2_09f=datetime.strptime('2009/08/08', '%Y/%m/%d')
fire_span1_10s=datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M') #jason's BC clear report
fire_span1_10f=datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')
fire_alpha = 0.15
fire_color = '#990000'
###################plotting#####################
fig = plt.figure(figsize=(12,12))
hfmt = dates.DateFormatter('%b')
#hfmt = dates.DateFormatter('%m-%d')
display_month_interval = 1
max_display_conc = 301
startdate_2009 = '2009/06/25'
enddate_2009 = '2009/08/20'
startdate_2010 = '2010/06/05'
enddate_2010 = '2010/08/04'
startdate_2012 = '2012/03/29'
enddate_2012 = '2012/06/05'
NPac_color = 'b'
SPac_color = 'g'
NCan_color = 'r'
LRT_color = 'orange'
BB_color = 'grey'
ax7 = plt.subplot2grid((6,3), (0,0), colspan=1,rowspan = 2)
ax8 = plt.subplot2grid((6,3), (0,1), colspan=1,rowspan = 2)
ax9 = plt.subplot2grid((6,3), (0,2), colspan=1,rowspan = 2)
ax13 = plt.subplot2grid((6,3), (2,0), colspan=1,rowspan = 2)
ax14 = plt.subplot2grid((6,3), (2,1), colspan=1,rowspan = 2)
ax15 = plt.subplot2grid((6,3), (2,2), colspan=1,rowspan = 2)
ax10 = plt.subplot2grid((6,3), (4,0), colspan=1,rowspan = 2)
ax11 = plt.subplot2grid((6,3), (4,1), colspan=1,rowspan = 2, sharey=ax10)
ax12 = plt.subplot2grid((6,3), (4,2), colspan=1,rowspan = 2, sharey=ax10)
#SP2
ax7.errorbar(SP2_6h_NPac_date,SP2_6h_NPac_mass_conc,yerr = SP2_6h_NPac_abs_err, color=NPac_color, alpha = 1, fmt = '<')
ax7.errorbar(SP2_6h_SPac_date,SP2_6h_SPac_mass_conc,yerr = SP2_6h_SPac_abs_err, color=SPac_color, alpha = 1, fmt = 'o')
ax7.errorbar(SP2_6h_Cont_date,SP2_6h_Cont_mass_conc,yerr = SP2_6h_Cont_abs_err, color=NCan_color, alpha = 1, fmt = '>')
ax7.errorbar(SP2_6h_LRT_date,SP2_6h_LRT_mass_conc,yerr = SP2_6h_LRT_abs_err, color=LRT_color, alpha = 1, fmt = 's')
ax7.errorbar(SP2_6h_BB_date,SP2_6h_BB_mass_conc,yerr = SP2_6h_BB_abs_err, color=BB_color, alpha = 1, fmt = 'd')
ax7.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax7.xaxis.set_visible(False)
ax7.yaxis.set_visible(True)
ax7.set_ylabel('Measured\nrBC mass concentration\n(ng/m3 - STP)')
ax7.set_ylim(0, 700)
ax7.set_xlim(dates.date2num(datetime.strptime(startdate_2009, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2009, '%Y/%m/%d')))
ax7.axvspan(dates.date2num(fire_span2_09s),dates.date2num(fire_span2_09f), facecolor=fire_color, alpha=fire_alpha)
ax7.text(0.1, 0.9,'2009', transform=ax7.transAxes)
ax8.errorbar(SP2_6h_NPac_date,SP2_6h_NPac_mass_conc,yerr = SP2_6h_NPac_abs_err, color=NPac_color, alpha = 1, fmt = '<', label = 'N. Pacific')
ax8.errorbar(SP2_6h_SPac_date,SP2_6h_SPac_mass_conc,yerr = SP2_6h_SPac_abs_err, color=SPac_color, alpha = 1, fmt = 'o', label = 'S. Pacific')
ax8.errorbar(SP2_6h_Cont_date,SP2_6h_Cont_mass_conc,yerr = SP2_6h_Cont_abs_err, color=NCan_color, alpha = 1, fmt = '>', label = 'N. Canada')
ax8.errorbar(SP2_6h_LRT_date,SP2_6h_LRT_mass_conc,yerr = SP2_6h_LRT_abs_err, color=LRT_color, alpha = 1, fmt = 's', label = 'W. Pacific/Asia')
ax8.errorbar(SP2_6h_BB_date,SP2_6h_BB_mass_conc,yerr = SP2_6h_BB_abs_err, color=BB_color, alpha = 1, fmt = 'd', label = 'local BB')
ax8.xaxis.set_major_formatter(hfmt)
ax8.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax8.xaxis.set_visible(False)
ax8.yaxis.set_visible(True)
ax8.yaxis.set_ticks(np.arange(0, max_display_conc, 100))
ax8.set_yticklabels([])
ax8.set_xlabel('month')
ax8.set_ylim(0, max_display_conc)
ax8.set_xlim(dates.date2num(datetime.strptime(startdate_2010, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2010, '%Y/%m/%d')))
ax8.axvspan(dates.date2num(fire_span1_10s),dates.date2num(fire_span1_10f), facecolor=fire_color, alpha=fire_alpha)
ax8.text(0.1, 0.9,'2010', transform=ax8.transAxes)
ax9.errorbar(SP2_6h_NPac_date,SP2_6h_NPac_mass_conc,yerr = SP2_6h_NPac_abs_err, color=NPac_color, alpha = 1, fmt = '<', label = 'NPac')
ax9.errorbar(SP2_6h_SPac_date,SP2_6h_SPac_mass_conc,yerr = SP2_6h_SPac_abs_err, color=SPac_color, alpha = 1, fmt = 'o', label = 'SPac')
ax9.errorbar(SP2_6h_Cont_date,SP2_6h_Cont_mass_conc,yerr = SP2_6h_Cont_abs_err, color=NCan_color, alpha = 1, fmt = '>', label = 'Cont')
ax9.errorbar(SP2_6h_LRT_date,SP2_6h_LRT_mass_conc,yerr = SP2_6h_LRT_abs_err, color=LRT_color, alpha = 1, fmt = 's', label = 'LRT')
ax9.errorbar(SP2_6h_BB_date,SP2_6h_BB_mass_conc,yerr = SP2_6h_BB_abs_err, color=BB_color, alpha = 1, fmt = 'd', label = 'BB')
ax9.xaxis.set_major_formatter(hfmt)
ax9.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax9.xaxis.set_visible(False)
ax9.yaxis.set_visible(True)
ax9.yaxis.set_ticks(np.arange(0, max_display_conc, 100))
ax9.yaxis.tick_right()
ax9.set_ylim(0, max_display_conc)
ax9.set_xlim(dates.date2num(datetime.strptime(startdate_2012, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2012, '%Y/%m/%d')))
ax9.text(0.1, 0.9,'2012', transform=ax9.transAxes)
legend = ax8.legend(loc='upper center', bbox_to_anchor=(0.5, 1.275), ncol=3, numpoints=1)
#GC
ax13.scatter(GC_6h_NPac_date,GC_6h_NPac_mass_conc, color=NPac_color, alpha = 1, marker = '<')
ax13.scatter(GC_6h_SPac_date,GC_6h_SPac_mass_conc, color=SPac_color, alpha = 1, marker = 'o')
ax13.scatter(GC_6h_Cont_date,GC_6h_Cont_mass_conc, color=NCan_color, alpha = 1, marker = '>')
ax13.scatter(GC_6h_LRT_date, GC_6h_LRT_mass_conc, color=LRT_color, alpha = 1, marker = 's')
ax13.scatter(GC_6h_BB_date, GC_6h_BB_mass_conc, color=BB_color, alpha = 1, marker = 'd')
ax13.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax13.xaxis.set_visible(False)
ax13.yaxis.set_visible(True)
ax13.set_ylabel('GEOS-Chem\nBC mass concentration\n(ng/m3 - STP)')
ax13.set_ylim(0, 700)
ax13.set_xlim(dates.date2num(datetime.strptime(startdate_2009, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2009, '%Y/%m/%d')))
ax13.axvspan(dates.date2num(fire_span2_09s),dates.date2num(fire_span2_09f), facecolor=fire_color, alpha=fire_alpha)
ax14.scatter(GC_6h_NPac_date,GC_6h_NPac_mass_conc, color=NPac_color, alpha = 1, marker = '<')
ax14.scatter(GC_6h_SPac_date,GC_6h_SPac_mass_conc, color=SPac_color, alpha = 1, marker = 'o')
ax14.scatter(GC_6h_Cont_date,GC_6h_Cont_mass_conc, color=NCan_color, alpha = 1, marker = '>')
ax14.scatter(GC_6h_LRT_date, GC_6h_LRT_mass_conc, color=LRT_color, alpha = 1, marker = 's')
ax14.scatter(GC_6h_BB_date, GC_6h_BB_mass_conc, color=BB_color, alpha = 1, marker = 'd')
ax14.xaxis.set_major_formatter(hfmt)
ax14.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax14.xaxis.set_visible(False)
ax14.yaxis.set_visible(True)
ax14.yaxis.set_ticks(np.arange(0, max_display_conc, 100))
ax14.set_yticklabels([])
ax14.set_xlabel('month')
ax14.set_ylim(0, max_display_conc)
ax14.set_xlim(dates.date2num(datetime.strptime(startdate_2010, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2010, '%Y/%m/%d')))
ax14.axvspan(dates.date2num(fire_span1_10s),dates.date2num(fire_span1_10f), facecolor=fire_color, alpha=fire_alpha)
ax15.scatter(GC_6h_NPac_date,GC_6h_NPac_mass_conc, color=NPac_color, alpha = 1, marker = '<')
ax15.scatter(GC_6h_SPac_date,GC_6h_SPac_mass_conc, color=SPac_color, alpha = 1, marker = 'o')
ax15.scatter(GC_6h_Cont_date,GC_6h_Cont_mass_conc, color=NCan_color, alpha = 1, marker = '>')
ax15.scatter(GC_6h_LRT_date, GC_6h_LRT_mass_conc, color=LRT_color, alpha = 1, marker = 's')
ax15.scatter(GC_6h_BB_date, GC_6h_BB_mass_conc, color=BB_color, alpha = 1, marker = 'd')
ax15.xaxis.set_major_formatter(hfmt)
ax15.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax15.xaxis.set_visible(False)
ax15.yaxis.set_visible(True)
ax15.yaxis.set_ticks(np.arange(0, max_display_conc, 100))
ax15.yaxis.tick_right()
ax15.set_ylim(0, max_display_conc)
ax15.set_xlim(dates.date2num(datetime.strptime(startdate_2012, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2012, '%Y/%m/%d')))
#ratios
ax10.errorbar(ratio_dates_SPac,ratio_mass_conc_SPac,yerr = ratio_err_SPac, color=SPac_color, alpha = 1, fmt = 'o')
ax10.errorbar(ratio_dates_NPac,ratio_mass_conc_NPac,yerr = ratio_err_NPac, color=NPac_color, alpha = 1, fmt = '<')
ax10.errorbar(ratio_dates_Cont,ratio_mass_conc_Cont,yerr = ratio_err_Cont, color=NCan_color, alpha = 1, fmt = '>')
ax10.errorbar(ratio_dates_LRT,ratio_mass_conc_LRT,yerr = ratio_err_LRT, color=LRT_color, alpha = 1, fmt = 's')
#ax10.errorbar(ratio_dates_BB,ratio_mass_conc_BB,yerr = ratio_err_BB, color=BB_color, alpha = 1, fmt = 'd')
#ax10.plot(all_dates,all_masses,color=BB_color)
ax10.xaxis.set_major_formatter(hfmt)
ax10.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax10.xaxis.set_minor_locator(dates.DayLocator(interval = 2))
ax10.xaxis.set_visible(True)
ax10.yaxis.set_visible(True)
ax10.set_ylabel('GEOS-Chem/Measurements')
#ax10.set_ylim(0, 70)
ax10.set_xlim(dates.date2num(datetime.strptime(startdate_2009, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2009, '%Y/%m/%d')))
ax10.axhline(y=1,color = BB_color, linestyle = '--')
ax10.axvspan(dates.date2num(fire_span2_09s),dates.date2num(fire_span2_09f), facecolor=fire_color, alpha=fire_alpha)
ax10.set_yscale('log')
ax11.errorbar(ratio_dates_SPac,ratio_mass_conc_SPac,yerr = ratio_err_SPac, color=SPac_color, alpha = 1, fmt = 'o')
ax11.errorbar(ratio_dates_NPac,ratio_mass_conc_NPac,yerr = ratio_err_NPac, color=NPac_color, alpha = 1, fmt = '<')
ax11.errorbar(ratio_dates_Cont,ratio_mass_conc_Cont,yerr = ratio_err_Cont, color=NCan_color, alpha = 1, fmt = '>')
ax11.errorbar(ratio_dates_LRT,ratio_mass_conc_LRT,yerr = ratio_err_LRT, color=LRT_color, alpha = 1, fmt = 's')
#ax11.errorbar(ratio_dates_BB,ratio_mass_conc_BB,yerr = ratio_err_BB, color=BB_color, alpha = 1, fmt = 'd')
#ax11.plot(all_dates,all_masses,color=BB_color)
ax11.xaxis.set_major_formatter(hfmt)
ax11.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax11.xaxis.set_minor_locator(dates.DayLocator(interval = 2))
ax11.xaxis.set_visible(True)
ax11.yaxis.set_visible(False)
ax11.set_xlabel('month')
ax11.set_xlim(dates.date2num(datetime.strptime(startdate_2010, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2010, '%Y/%m/%d')))
ax11.axhline(y=1,color = BB_color, linestyle = '--')
ax11.axvspan(dates.date2num(fire_span1_10s),dates.date2num(fire_span1_10f), facecolor=fire_color, alpha=fire_alpha)
ax11.set_yscale('log')
ax12.errorbar(ratio_dates_SPac,ratio_mass_conc_SPac,yerr = ratio_err_SPac, color=SPac_color, alpha = 1, fmt = '<')
ax12.errorbar(ratio_dates_NPac,ratio_mass_conc_NPac,yerr = ratio_err_NPac, color=NPac_color, alpha = 1, fmt = '*')
ax12.errorbar(ratio_dates_Cont,ratio_mass_conc_Cont,yerr = ratio_err_Cont, color=NCan_color, alpha = 1, fmt = '>')
ax12.errorbar(ratio_dates_LRT,ratio_mass_conc_LRT,yerr = ratio_err_LRT, color=LRT_color, alpha = 1, fmt = 's')
#ax12.errorbar(ratio_dates_BB,ratio_mass_conc_BB,yerr = ratio_err_BB, color=BB_color, alpha = 1, fmt = 'd')
#ax12.plot(all_dates,all_masses,color=BB_color)
ax12.xaxis.set_major_formatter(hfmt)
ax12.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax12.xaxis.set_minor_locator(dates.DayLocator(interval = 2))
ax12.xaxis.set_visible(True)
ax12.yaxis.set_visible(True)
ax12.yaxis.tick_right()
#ax12.spines['top'].set_visible(False)
#ax12.xaxis.tick_bottom()
ax12.set_xlim(dates.date2num(datetime.strptime(startdate_2012, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2012, '%Y/%m/%d')))
ax12.axhline(y=1,color = BB_color, linestyle = '--')
ax12.set_yscale('log')
#legend = ax12.legend(loc='upper right', shadow=False)
plt.subplots_adjust(hspace=0.08)
plt.subplots_adjust(wspace=0.05)
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/')
plt.savefig('timeseries - FT only GEOS-Chem v10 v measurements - db - default 6h - RH90 - three row.png', bbox_extra_artists=(legend,), bbox_inches='tight',dpi=600)
plt.show()
|
|
################################################################################
# Copyright (c) 2017-2021, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""A (lazy) container for the triplet of visibilities, flags and weights."""
import itertools
import logging
import dask.array as da
import numba
import numpy as np
import toolz
from dask.array.rechunk import intersect_chunks
from dask.highlevelgraph import HighLevelGraph
from .chunkstore import PlaceholderChunk
from .flags import DATA_LOST
from .van_vleck import autocorr_lookup_table
logger = logging.getLogger(__name__)
class VisFlagsWeights:
"""Correlator data in the form of visibilities, flags and weights.
This container stores the triplet of visibilities, flags and weights
as dask arrays to provide lazy / deferred access to the data.
Parameters
----------
vis : :class:`dask.array.Array` of complex64, shape (*T*, *F*, *B*)
Complex visibility data as a function of time, frequency and baseline
flags : :class:`dask.array.Array` of uint8, shape (*T*, *F*, *B*)
Flags as a function of time, frequency and baseline
weights : :class:`dask.array.Array` of float32, shape (*T*, *F*, *B*)
Visibility weights as a function of time, frequency and baseline
unscaled_weights : :class:`dask.array.Array` of float32, shape (*T*, *F*, *B*)
Weights that are not scaled by autocorrelations, thereby representing
the number of voltage samples that constitutes each visibility (optional)
name : string, optional
Identifier that describes the origin of the data (backend-specific)
"""
def __init__(self, vis, flags, weights, unscaled_weights=None):
if not (vis.shape == flags.shape == weights.shape):
raise ValueError(f'Shapes of vis {vis.shape}, flags {flags.shape} '
f'and weights {weights.shape} differ')
if unscaled_weights is not None and (unscaled_weights.shape != vis.shape):
raise ValueError(f'Shapes of unscaled weights {unscaled_weights.shape} '
f'and vis {vis.shape} differ')
self.vis = vis
self.flags = flags
self.weights = weights
self.unscaled_weights = unscaled_weights
@property
def shape(self):
return self.vis.shape
def _default_zero(array):
if isinstance(array, PlaceholderChunk):
return np.zeros(array.shape, array.dtype)
else:
return array
def _apply_data_lost(orig_flags, lost):
if not lost:
return orig_flags
flags = orig_flags
for chunk, slices in toolz.partition(2, lost):
if isinstance(chunk, PlaceholderChunk):
if flags is orig_flags:
flags = orig_flags.copy()
flags[slices] |= DATA_LOST
return flags
def _narrow(array):
"""Reduce an integer array to the narrowest type that can hold it.
It is specialised for unsigned types. It will not alter the dtype
if the array contains negative values.
If the type is not changed, a view is returned rather than a copy.
"""
if array.dtype.kind not in ['u', 'i']:
raise ValueError('Array is not integral')
if not array.size:
dtype = np.uint8
else:
low = np.min(array)
high = np.max(array)
if low < 0:
dtype = array.dtype
elif high <= 0xFF:
dtype = np.uint8
elif high <= 0xFFFF:
dtype = np.uint16
elif high <= 0xFFFFFFFF:
dtype = np.uint32
else:
dtype = array.dtype
return array.astype(dtype, copy=False)
def corrprod_to_autocorr(corrprods):
"""Find the autocorrelation indices of correlation products.
Parameters
----------
corrprods : sequence of 2-tuples or ndarray
Input labels of the correlation products
Returns
-------
auto_indices : np.ndarray
The indices in corrprods that correspond to auto-correlations
index1, index2 : np.ndarray
Lists of the same length as corrprods, containing the indices within
`auto_indices` referring to the first and second corresponding
autocorrelations.
Raises
------
KeyError
If any of the autocorrelations are missing
"""
auto_indices = []
auto_lookup = {}
for i, baseline in enumerate(corrprods):
if baseline[0] == baseline[1]:
auto_lookup[baseline[0]] = len(auto_indices)
auto_indices.append(i)
index1 = [auto_lookup[a] for (a, b) in corrprods]
index2 = [auto_lookup[b] for (a, b) in corrprods]
return _narrow(np.array(auto_indices)), _narrow(np.array(index1)), _narrow(np.array(index2))
def correct_autocorr_quantisation(vis, corrprods, levels=None):
"""Correct autocorrelations for quantisation effects (Van Vleck correction).
This is a first-order correction that only adjusts the mean autocorrelations,
which in turn affects the autocorrelation and crosscorrelation weights.
A complete correction would also adjust the mean crosscorrelations, and
further improve the weight estimates based on Bayesian statistics.
Parameters
----------
vis : :class:`dask.array.Array` of complex64, shape (*T*, *F*, *B*)
Complex visibility data as function of time, frequency, correlation product
corrprods : sequence of 2-tuples or ndarray, containing str
Input labels of the correlation products, used to find autocorrelations
levels : sequence of float, optional
Quantisation levels of real/imag components of complex digital signal
entering correlator (defaults to MeerKAT F-engine output levels)
Returns
-------
corrected_vis : :class:`dask.array.Array` of complex64, shape (*T*, *F*, *B*)
Complex visibility data with autocorrelations corrected for quantisation
"""
assert len(corrprods) == vis.shape[2]
# Ensure that we have only a single chunk on the baseline axis.
if len(vis.chunks[2]) > 1:
vis = vis.rechunk({2: vis.shape[2]})
auto_indices, _, _ = corrprod_to_autocorr(corrprods)
if levels is None:
# 255-level "8-bit" output of MeerKAT F-engine requantiser
levels = np.arange(-127., 128.)
quantised_autocorr_table, true_autocorr_table = autocorr_lookup_table(levels)
def _correct_autocorr_quant(vis):
out = vis.copy()
out[..., auto_indices] = np.interp(vis[..., auto_indices].real,
quantised_autocorr_table, true_autocorr_table)
return out
return da.blockwise(_correct_autocorr_quant, 'ijk', vis, 'ijk', dtype=np.complex64,
name='van-vleck-autocorr-' + vis.name)
@numba.jit(nopython=True, nogil=True)
def weight_power_scale(vis, weights, auto_indices, index1, index2, out=None, divide=True):
"""Divide (or multiply) weights by autocorrelations (ndarray version).
The weight associated with visibility (i,j) is divided (or multiplied) by
the corresponding real visibilities (i,i) and (j,j).
This function is designed to be usable with :func:`dask.array.blockwise`.
Parameters
----------
vis : np.ndarray
Chunk of visibility data, with dimensions time, frequency, baseline
(or any two dimensions then baseline). It must contain all the
baselines of a stream, even though only the autocorrelations are used.
weights : np.ndarray
Chunk of weight data, with the same shape as `vis`
auto_indices, index1, index2 : np.ndarray
Arrays returned by :func:`corrprod_to_autocorr`
out : np.ndarray, optional
If specified, the output array, with same shape as `vis` and
dtype ``np.float32``
divide : bool, optional
True if weights will be divided by autocorrelations, otherwise
they will be multiplied
"""
auto_scale = np.empty(len(auto_indices), np.float32)
out = np.empty(vis.shape, np.float32) if out is None else out
bad_weight = np.float32(2.0**-32)
for i in range(vis.shape[0]):
for j in range(vis.shape[1]):
for k in range(len(auto_indices)):
autocorr = vis[i, j, auto_indices[k]].real
auto_scale[k] = np.reciprocal(autocorr) if divide else autocorr
for k in range(vis.shape[2]):
p = auto_scale[index1[k]] * auto_scale[index2[k]]
# If either or both of the autocorrelations has zero power then
# there is likely something wrong with the system. Set the
# weight to very close to zero (not actually zero, since that
# can cause divide-by-zero problems downstream).
if not np.isfinite(p):
p = bad_weight
out[i, j, k] = p * weights[i, j, k]
return out
def _scale_weights(vis, weights, corrprods, divide):
"""Divide (or multiply) weights by autocorrelations (dask array version)."""
assert len(corrprods) == vis.shape[2]
# Ensure that we have only a single chunk on the baseline axis.
if len(vis.chunks[2]) > 1:
vis = vis.rechunk({2: vis.shape[2]})
if len(weights.chunks[2]) > 1:
weights = weights.rechunk({2: weights.shape[2]})
auto_indices, index1, index2 = corrprod_to_autocorr(corrprods)
return da.blockwise(weight_power_scale, 'ijk', vis, 'ijk', weights, 'ijk',
dtype=np.float32, auto_indices=auto_indices,
index1=index1, index2=index2, divide=divide)
class ChunkStoreVisFlagsWeights(VisFlagsWeights):
"""Correlator data stored in a chunk store.
Parameters
----------
store : :class:`ChunkStore` object
Chunk store
chunk_info : dict mapping array name to info dict
Dict specifying prefix, dtype, shape and chunks per array
corrprods : sequence of 2-tuples of input labels, optional
Correlation products. If given, compute both (scaled) `weights` and
`unscaled_weights` by dividing or multiplying by the autocorrelations
as needed. If `None`, the stored weights become `weights` and
`unscaled_weights` is `None`, i.e. disabled (useful for testing).
stored_weights_are_scaled : bool, optional
True if the weights in the chunk store are already scaled by
the autocorrelations. This determines how (scaled) `weights`
and `unscaled_weights` are obtained from the stored weights.
Should be True if `corrprods` is `None`.
van_vleck : {'off', 'autocorr'}, optional
Type of Van Vleck (quantisation) correction to perform
index : tuple of slice, optional
Slice expression to apply to each array before combining them. At the
moment this can only have two elements (no slicing of baselines),
because ``weights_channel`` only has time and frequency dimensions.
Attributes
----------
vis_prefix : string
Prefix of correlator_data / visibility array, viz. its S3 bucket name
"""
def __init__(self, store, chunk_info, corrprods=None,
stored_weights_are_scaled=True, van_vleck='off', index=()):
self.store = store
self.vis_prefix = chunk_info['correlator_data']['prefix']
darray = {}
for array, info in chunk_info.items():
array_name = store.join(info['prefix'], array)
chunk_args = (array_name, info['chunks'], info['dtype'])
errors = DATA_LOST if array == 'flags' else 'placeholder'
darray[array] = store.get_dask_array(*chunk_args, index=index, errors=errors)
flags_orig_name = darray['flags'].name
flags_raw_name = store.join(chunk_info['flags']['prefix'], 'flags_raw')
# Combine original flags with data_lost indicating where values were lost from
# other arrays.
lost_map = np.empty([len(c) for c in darray['flags'].chunks], dtype="O")
for index in np.ndindex(lost_map.shape):
lost_map[index] = []
for array_name, array in darray.items():
if array_name == 'flags':
continue
# Source keys may appear multiple times in the array, so to save
# memory we can pre-create the objects for the keys and reuse them
# (idea borrowed from dask.array.rechunk).
src_keys = np.empty([len(c) for c in array.chunks], dtype="O")
for index in np.ndindex(src_keys.shape):
src_keys[index] = (array.name,) + index
# array may have fewer dimensions than flags
# (specifically, for weights_channel).
chunks = array.chunks
if array.ndim < darray['flags'].ndim:
chunks += tuple((x,) for x in darray['flags'].shape[array.ndim:])
intersections = intersect_chunks(darray['flags'].chunks, chunks)
for src_key, pieces in zip(src_keys.flat, intersections):
for piece in pieces:
dst_index, slices = zip(*piece)
# if src_key is missing, then the parts of dst_index
# indicated by slices must be flagged.
# TODO: fast path for when slices covers the whole chunk?
lost_map[dst_index].extend([src_key, slices])
dsk = {
(flags_raw_name,) + key: (
_apply_data_lost,
(flags_orig_name,) + key,
value
) for key, value in np.ndenumerate(lost_map)
}
dsk = HighLevelGraph.from_collections(
flags_raw_name, dsk, dependencies=list(darray.values())
)
flags = da.Array(dsk, flags_raw_name,
chunks=darray['flags'].chunks,
shape=darray['flags'].shape,
dtype=darray['flags'].dtype)
darray['flags'] = flags
# Turn missing blocks in the other arrays into zeros to make them
# valid dask arrays.
for array_name, array in darray.items():
if array_name == 'flags':
continue
new_name = 'filled-' + array.name
indices = itertools.product(*(range(len(c)) for c in array.chunks))
dsk = {
(new_name,) + index: (
_default_zero,
(array.name,) + index
) for index, shape in zip(indices, itertools.product(*array.chunks))
}
dsk = HighLevelGraph.from_collections(new_name, dsk, dependencies=[array])
darray[array_name] = da.Array(dsk, new_name,
chunks=array.chunks,
shape=array.shape,
dtype=array.dtype)
# Optionally correct visibilities for quantisation effects
vis = darray['correlator_data']
if van_vleck == 'autocorr':
vis = correct_autocorr_quantisation(vis, corrprods)
elif van_vleck != 'off':
raise ValueError("The van_vleck parameter should be one of ['off', 'autocorr'], "
f"got '{van_vleck}' instead")
# Combine low-resolution weights and high-resolution weights_channel
stored_weights = darray['weights'] * darray['weights_channel'][..., np.newaxis]
# Scale weights according to power (or remove scaling if already applied)
if corrprods is not None:
if stored_weights_are_scaled:
weights = stored_weights
unscaled_weights = _scale_weights(vis, stored_weights, corrprods, divide=False)
else:
weights = _scale_weights(vis, stored_weights, corrprods, divide=True)
unscaled_weights = stored_weights
else:
if not stored_weights_are_scaled:
raise ValueError('Stored weights are unscaled but no corrprods are provided')
weights = stored_weights
# Don't bother with unscaled weights (it's optional)
unscaled_weights = None
VisFlagsWeights.__init__(self, vis, flags, weights, unscaled_weights)
|
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import time
# from system_model_dif import SystemModel_dif
from system_model import SystemModel
# from system_model_wrong import SystemModel_wrong
from game_ac_network import GameACLSTMNetwork
from constants import GAMMA
from constants import LOCAL_T_MAX
from constants import ENTROPY_BETA
from constants import ACTION_SIZE
from constants import MAX_TIME_STEP
LOG_INTERVAL = 1000
PERFORMANCE_LOG_INTERVAL = 1000
class A3CTrainingThread(object):
def __init__(self,
thread_index,
global_network,
initial_learning_rate,
learning_rate_input,
grad_applier,
max_global_time_step,
device,
):
print ("This is the thread", thread_index)
self.thread_index = thread_index
self.learning_rate_input = learning_rate_input
self.max_global_time_step = max_global_time_step
self.local_network = GameACLSTMNetwork(ACTION_SIZE, thread_index, device)
self.local_network.prepare_loss(ENTROPY_BETA)
self.actions_thread = []
with tf.device(device):
var_refs = [v._ref() for v in self.local_network.get_vars()]
self.gradients = tf.gradients(
self.local_network.total_loss, var_refs,
gate_gradients=False,
aggregation_method=None,
colocate_gradients_with_ops=False)
self.apply_gradients = grad_applier.apply_gradients(
global_network.get_vars(),
self.gradients )
self.sync = self.local_network.sync_from(global_network)
# if self.thread_index >=2 :
# self.model = SystemModel_dif()
# # # elif self.thread_index == 5 :
# # # self.model = SystemModel_wrong()
# # else:
self.model = SystemModel()
self.local_t = 0
self.episode_rate = 0
self.episode_rate_ave = 0
self.episode_count_local = 0
self.initial_learning_rate = initial_learning_rate
self.episode_reward = 0
# variable controling log output
self.prev_local_t = 0
# get the initial state from the initial action
# self.model.intialize_para()
def _anneal_learning_rate(self, global_time_step):
learning_rate = self.initial_learning_rate * (self.max_global_time_step - global_time_step) / self.max_global_time_step
if learning_rate < 0.0:
learning_rate = 0.0
return learning_rate
def choose_action(self, pi_values):
return np.random.choice(range(len(pi_values)), p=pi_values)
def _record_score(self, sess, summary_writer, summary_op, score_input, score, global_t):# rate_input, rate, reward_handover_input, reward_handover,
summary_str = sess.run(summary_op, feed_dict={
score_input: score})
# , rate_input:rate, reward_handover_input:reward_handover
summary_writer.add_summary(summary_str, global_t)#
summary_writer.flush()
def set_start_time(self, start_time):
self.start_time = start_time
def process(self, sess, global_t, summary_writer, summary_op, score_input):#, rate_input, reward_handover_input):
states = []
actions = []
rewards = []
values = []
terminal_end = False
# copy weights from shared to local
sess.run( self.sync )
start_local_t = self.local_t
start_episode_count = self.episode_count_local
start_lstm_state = self.local_network.lstm_state_out
t = 1
for i in range(LOCAL_T_MAX):
pi_, value_ = self.local_network.run_policy_and_value(sess, self.model.s_t)
action = self.choose_action(pi_)
states.append(self.model.s_t)
actions.append(action)
values.append(value_)
# self.actions_thread.append(action)
if (self.thread_index == 0) and (self.local_t % LOG_INTERVAL == 0):#
print("pi={}".format(pi_))
print(" V={}".format(value_))
print("thread", self.thread_index)
# print("user", self.model.users)
# process game
self.model.state_update(actions[i - 1], actions[i])
# if (self.thread_index == 0):
# print(self.model.rate, self.model.test_rates, self.thread_index)
#receive game result
reward = self.model.reward
self.episode_reward += reward
self.episode_rate += self.model.rate
# if (self.thread_index == 0):
# print("Thread", self.thread_index, "reward", reward, "episode_reward", self.episode_reward, "global_t", global_t, "local_t", self.local_t, "rate", self.model.rate, "handover_reward", self.model.reward_handover)
rewards.append(reward)
# print ('rewards', rewards)
self.local_t += 1
t += 1
# s_t1 -> s_t
self.model.update()
# if self.local_t % 500 == 0 and self.local_t != 0:
# print("score={}".format(self.episode_reward))
terminal = self.model.terminal
if self.model.terminal:
break
#self.episode_reward / t
if terminal:
terminal_end = True
# self.episode_count_local +=1
# self.handover_ratio = self.model.count_handover_total / (
# self.model.count_no_handover + self.model.count_handover_total + 1)
# self.episode_rate_ave = self.episode_rate / t
# self.episode_reward_ave = self.episode_reward / t
self._record_score(sess, summary_writer, summary_op, score_input,
value_, global_t)\
# , rate_input, self.episode_rate_ave, reward_handover_input,
# self.handover_ratio) #value_
# if self.local_t % 100 == 0 and self.local_t != 0:
# self.model.count_no_handover = 0
# self.model.count_handover_total = 0
# self.episode_rate = 0
# self.episode_reward = 0
# self.local_network.reset_state()
self.model.init_users()
else:
# self.handover_ratio = self.model.count_handover_total / (
# self.model.count_no_handover + self.model.count_handover_total +1)
# self.episode_rate_ave = self.episode_rate / t
# self.episode_reward_ave = self.episode_reward / t
self._record_score(sess, summary_writer, summary_op, score_input,
value_, global_t)
# , rate_input,self.episode_rate_ave, reward_handover_input,
# self.handover_ratio)
# if self.local_t %100 == 0 and self.local_t !=0:
# self.model.count_no_handover = 0
# self.model.count_handover_total = 0
# self.episode_rate = 0
# self.episode_reward = 0
# self.local_network.reset_state()
# self.model.init_users()
R = 0.0
if not terminal_end:
R = self.local_network.run_value(sess, self.model.s_t)
actions.reverse()
states.reverse()
rewards.reverse()
values.reverse()
batch_si = []
batch_a = []
batch_td = []
batch_R = []
# compute and accmulate gradients
for(ai, ri, si, Vi) in zip(actions, rewards, states, values):
R = ri + GAMMA * R
td = R - Vi
a = np.zeros([ACTION_SIZE])
a[ai] = 1
batch_si.append(si)
batch_a.append(a)
batch_td.append(td)
batch_R.append(R)
cur_learning_rate = self._anneal_learning_rate(global_t)
batch_si.reverse()
batch_a.reverse()
batch_td.reverse()
batch_R.reverse()
_,loss_value = sess.run( [self.apply_gradients,self.local_network.total_loss],
feed_dict = {
self.local_network.s: batch_si,
self.local_network.a: batch_a,
self.local_network.td: batch_td,
self.local_network.r: batch_R,
self.local_network.initial_lstm_state: start_lstm_state,
self.local_network.step_size : [len(batch_a)],
self.learning_rate_input: cur_learning_rate } )
# _, loss_value = sess.run([self.apply_gradients,self.local_network.total_loss],
# feed_dict={
# self.local_network.s: batch_si,
# self.local_network.a: batch_a,
# self.local_network.td: batch_td,
# self.local_network.r: batch_R,
# self.learning_rate_input: cur_learning_rate})
# if ((self.thread_index == 0) ):
# print(self.model.users)
if ((self.thread_index == 0) ) and(self.local_t - self.prev_local_t >= PERFORMANCE_LOG_INTERVAL): #
self.prev_local_t += PERFORMANCE_LOG_INTERVAL
elapsed_time = time.time() - self.start_time
steps_per_sec = global_t / elapsed_time
print("### Performance :{} thread {} loss {} STEPS in {:.0f} sec. {:.0f} STEPS/sec. {:.2f}M STEPS/hour".format(
self.thread_index,loss_value, global_t, elapsed_time, steps_per_sec, steps_per_sec * 3600 / 1000000.))
# return advanced local step size
diff_local_t = self.local_t - start_local_t
diff_episode_count = self.episode_count_local - start_episode_count
return diff_local_t
|
|
from collections import defaultdict
from django.template.base import (
Library, Node, TemplateSyntaxError, TextNode, Variable, token_kwargs,
)
from django.utils import six
from django.utils.safestring import mark_safe
register = Library()
BLOCK_CONTEXT_KEY = 'block_context'
class ExtendsError(Exception):
pass
class BlockContext(object):
def __init__(self):
# Dictionary of FIFO queues.
self.blocks = defaultdict(list)
def add_blocks(self, blocks):
for name, block in six.iteritems(blocks):
self.blocks[name].insert(0, block)
def pop(self, name):
try:
return self.blocks[name].pop()
except IndexError:
return None
def push(self, name, block):
self.blocks[name].append(block)
def get_block(self, name):
try:
return self.blocks[name][-1]
except IndexError:
return None
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
with context.push():
if block_context is None:
context['block'] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without thread-safety issues.
block = type(self)(block.name, block.nodelist)
block.context = context
context['block'] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
return result
def super(self):
if not hasattr(self, 'context'):
raise TemplateSyntaxError(
"'%s' object has no attribute 'context'. Did you use "
"{{ block.super }} in a base template?" % self.__class__.__name__
)
render_context = self.context.render_context
if (BLOCK_CONTEXT_KEY in render_context and
render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
class ExtendsNode(Node):
must_be_first = True
def __init__(self, nodelist, parent_name, template_dirs=None):
self.nodelist = nodelist
self.parent_name = parent_name
self.template_dirs = template_dirs
self.blocks = {n.name: n for n in nodelist.get_nodes_by_type(BlockNode)}
def __repr__(self):
return '<ExtendsNode: extends %s>' % self.parent_name.token
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name.filters or\
isinstance(self.parent_name.var, Variable):
error_msg += " Got this from the '%s' variable." %\
self.parent_name.token
raise TemplateSyntaxError(error_msg)
if hasattr(parent, 'render'):
return parent # parent is a Template object
return context.engine.get_template(parent)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = {n.name: n for n in
compiled_parent.nodelist.get_nodes_by_type(BlockNode)}
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
return compiled_parent._render(context)
class IncludeNode(Node):
def __init__(self, template, *args, **kwargs):
self.template = template
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(IncludeNode, self).__init__(*args, **kwargs)
def render(self, context):
try:
template = self.template.resolve(context)
# Does this quack like a Template?
if not callable(getattr(template, 'render', None)):
# If not, we'll try get_template
template = context.engine.get_template(template)
values = {
name: var.resolve(context)
for name, var in six.iteritems(self.extra_context)
}
if self.isolated_context:
return template.render(context.new(values))
with context.push(**values):
return template.render(context)
except Exception:
if context.engine.debug:
raise
return ''
@register.tag('block')
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock',))
# This check is kept for backwards-compatibility. See #3100.
endblock = parser.next_token()
acceptable_endblocks = ('endblock', 'endblock %s' % block_name)
if endblock.contents not in acceptable_endblocks:
parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks)
return BlockNode(block_name, nodelist)
@register.tag('extends')
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string) or as
the parent template itself (if it evaluates to a Template object).
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0])
return ExtendsNode(nodelist, parent_name)
@register.tag('include')
def do_include(parser, token):
"""
Loads a template and renders it with the current context. You can pass
additional context using keyword arguments.
Example::
{% include "foo/some_include" %}
{% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}
Use the ``only`` argument to exclude the current context when rendering
the included template::
{% include "foo/some_include" only %}
{% include "foo/some_include" with bar="1" only %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"%r tag takes at least one argument: the name of the template to "
"be included." % bits[0]
)
options = {}
remaining_bits = bits[2:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap,
isolated_context=isolated_context)
|
|
"""
Support for Honeywell Round Connected and Honeywell Evohome thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.honeywell/
"""
import logging
import socket
import datetime
import requests
import voluptuous as vol
from homeassistant.components.climate import (
ClimateDevice, PLATFORM_SCHEMA, ATTR_FAN_MODE, ATTR_FAN_LIST,
ATTR_OPERATION_MODE, ATTR_OPERATION_LIST)
from homeassistant.const import (
CONF_PASSWORD, CONF_USERNAME, TEMP_CELSIUS, TEMP_FAHRENHEIT,
ATTR_TEMPERATURE)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['evohomeclient==0.2.5',
'somecomfort==0.4.1']
_LOGGER = logging.getLogger(__name__)
ATTR_FAN = 'fan'
ATTR_SYSTEM_MODE = 'system_mode'
ATTR_CURRENT_OPERATION = 'equipment_output_status'
CONF_AWAY_TEMPERATURE = 'away_temperature'
CONF_COOL_AWAY_TEMPERATURE = 'away_cool_temperature'
CONF_HEAT_AWAY_TEMPERATURE = 'away_heat_temperature'
CONF_REGION = 'region'
DEFAULT_AWAY_TEMPERATURE = 16
DEFAULT_COOL_AWAY_TEMPERATURE = 30
DEFAULT_HEAT_AWAY_TEMPERATURE = 16
DEFAULT_REGION = 'eu'
REGIONS = ['eu', 'us']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_AWAY_TEMPERATURE,
default=DEFAULT_AWAY_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_COOL_AWAY_TEMPERATURE,
default=DEFAULT_COOL_AWAY_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_HEAT_AWAY_TEMPERATURE,
default=DEFAULT_HEAT_AWAY_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_REGION, default=DEFAULT_REGION): vol.In(REGIONS),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Honeywell thermostat."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
region = config.get(CONF_REGION)
if region == 'us':
return _setup_us(username, password, config, add_devices)
return _setup_round(username, password, config, add_devices)
def _setup_round(username, password, config, add_devices):
"""Set up the rounding function."""
from evohomeclient import EvohomeClient
away_temp = config.get(CONF_AWAY_TEMPERATURE)
evo_api = EvohomeClient(username, password)
try:
zones = evo_api.temperatures(force_refresh=True)
for i, zone in enumerate(zones):
add_devices(
[RoundThermostat(evo_api, zone['id'], i == 0, away_temp)],
True
)
except socket.error:
_LOGGER.error(
"Connection error logging into the honeywell evohome web service")
return False
return True
# config will be used later
def _setup_us(username, password, config, add_devices):
"""Set up the user."""
import somecomfort
try:
client = somecomfort.SomeComfort(username, password)
except somecomfort.AuthError:
_LOGGER.error("Failed to login to honeywell account %s", username)
return False
except somecomfort.SomeComfortError as ex:
_LOGGER.error("Failed to initialize honeywell client: %s", str(ex))
return False
dev_id = config.get('thermostat')
loc_id = config.get('location')
cool_away_temp = config.get(CONF_COOL_AWAY_TEMPERATURE)
heat_away_temp = config.get(CONF_HEAT_AWAY_TEMPERATURE)
add_devices([HoneywellUSThermostat(client, device, cool_away_temp,
heat_away_temp, username, password)
for location in client.locations_by_id.values()
for device in location.devices_by_id.values()
if ((not loc_id or location.locationid == loc_id) and
(not dev_id or device.deviceid == dev_id))])
return True
class RoundThermostat(ClimateDevice):
"""Representation of a Honeywell Round Connected thermostat."""
def __init__(self, client, zone_id, master, away_temp):
"""Initialize the thermostat."""
self.client = client
self._current_temperature = None
self._target_temperature = None
self._name = 'round connected'
self._id = zone_id
self._master = master
self._is_dhw = False
self._away_temp = away_temp
self._away = False
@property
def name(self):
"""Return the name of the honeywell, if any."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._is_dhw:
return None
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self.client.set_temperature(self._name, temperature)
@property
def current_operation(self: ClimateDevice) -> str:
"""Get the current operation of the system."""
return getattr(self.client, ATTR_SYSTEM_MODE, None)
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
def set_operation_mode(self: ClimateDevice, operation_mode: str) -> None:
"""Set the HVAC mode for the thermostat."""
if hasattr(self.client, ATTR_SYSTEM_MODE):
self.client.system_mode = operation_mode
def turn_away_mode_on(self):
"""Turn away on.
Honeywell does have a proprietary away mode, but it doesn't really work
the way it should. For example: If you set a temperature manually
it doesn't get overwritten when away mode is switched on.
"""
self._away = True
self.client.set_temperature(self._name, self._away_temp)
def turn_away_mode_off(self):
"""Turn away off."""
self._away = False
self.client.cancel_temp_override(self._name)
def update(self):
"""Get the latest date."""
try:
# Only refresh if this is the "master" device,
# others will pick up the cache
for val in self.client.temperatures(force_refresh=self._master):
if val['id'] == self._id:
data = val
except StopIteration:
_LOGGER.error("Did not receive any temperature data from the "
"evohomeclient API")
return
self._current_temperature = data['temp']
self._target_temperature = data['setpoint']
if data['thermostat'] == 'DOMESTIC_HOT_WATER':
self._name = 'Hot Water'
self._is_dhw = True
else:
self._name = data['name']
self._is_dhw = False
# The underlying library doesn't expose the thermostat's mode
# but we can pull it out of the big dictionary of information.
device = self.client.devices[self._id]
self.client.system_mode = device[
'thermostat']['changeableValues']['mode']
class HoneywellUSThermostat(ClimateDevice):
"""Representation of a Honeywell US Thermostat."""
def __init__(self, client, device, cool_away_temp,
heat_away_temp, username, password):
"""Initialize the thermostat."""
self._client = client
self._device = device
self._cool_away_temp = cool_away_temp
self._heat_away_temp = heat_away_temp
self._away = False
self._username = username
self._password = password
@property
def is_fan_on(self):
"""Return true if fan is on."""
return self._device.fan_running
@property
def name(self):
"""Return the name of the honeywell, if any."""
return self._device.name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return (TEMP_CELSIUS if self._device.temperature_unit == 'C'
else TEMP_FAHRENHEIT)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._device.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._device.system_mode == 'cool':
return self._device.setpoint_cool
return self._device.setpoint_heat
@property
def current_operation(self: ClimateDevice) -> str:
"""Return current operation ie. heat, cool, idle."""
oper = getattr(self._device, ATTR_CURRENT_OPERATION, None)
if oper == "off":
oper = "idle"
return oper
def set_temperature(self, **kwargs):
"""Set target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
import somecomfort
try:
# Get current mode
mode = self._device.system_mode
# Set hold if this is not the case
if getattr(self._device, "hold_{}".format(mode)) is False:
# Get next period key
next_period_key = '{}NextPeriod'.format(mode.capitalize())
# Get next period raw value
next_period = self._device.raw_ui_data.get(next_period_key)
# Get next period time
hour, minute = divmod(next_period * 15, 60)
# Set hold time
setattr(self._device,
"hold_{}".format(mode),
datetime.time(hour, minute))
# Set temperature
setattr(self._device,
"setpoint_{}".format(mode),
temperature)
except somecomfort.SomeComfortError:
_LOGGER.error("Temperature %.1f out of range", temperature)
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
import somecomfort
data = {
ATTR_FAN: (self.is_fan_on and 'running' or 'idle'),
ATTR_FAN_MODE: self._device.fan_mode,
ATTR_OPERATION_MODE: self._device.system_mode,
}
data[ATTR_FAN_LIST] = somecomfort.FAN_MODES
data[ATTR_OPERATION_LIST] = somecomfort.SYSTEM_MODES
return data
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
def turn_away_mode_on(self):
"""Turn away on.
Somecomfort does have a proprietary away mode, but it doesn't really
work the way it should. For example: If you set a temperature manually
it doesn't get overwritten when away mode is switched on.
"""
self._away = True
import somecomfort
try:
# Get current mode
mode = self._device.system_mode
except somecomfort.SomeComfortError:
_LOGGER.error('Can not get system mode')
return
try:
# Set permanent hold
setattr(self._device,
"hold_{}".format(mode),
True)
# Set temperature
setattr(self._device,
"setpoint_{}".format(mode),
getattr(self, "_{}_away_temp".format(mode)))
except somecomfort.SomeComfortError:
_LOGGER.error('Temperature %.1f out of range',
getattr(self, "_{}_away_temp".format(mode)))
def turn_away_mode_off(self):
"""Turn away off."""
self._away = False
import somecomfort
try:
# Disabling all hold modes
self._device.hold_cool = False
self._device.hold_heat = False
except somecomfort.SomeComfortError:
_LOGGER.error('Can not stop hold mode')
def set_operation_mode(self: ClimateDevice, operation_mode: str) -> None:
"""Set the system mode (Cool, Heat, etc)."""
if hasattr(self._device, ATTR_SYSTEM_MODE):
self._device.system_mode = operation_mode
def update(self):
"""Update the state."""
import somecomfort
retries = 3
while retries > 0:
try:
self._device.refresh()
break
except (somecomfort.client.APIRateLimited, OSError,
requests.exceptions.ReadTimeout) as exp:
retries -= 1
if retries == 0:
raise exp
if not self._retry():
raise exp
_LOGGER.error(
"SomeComfort update failed, Retrying - Error: %s", exp)
def _retry(self):
"""Recreate a new somecomfort client.
When we got an error, the best way to be sure that the next query
will succeed, is to recreate a new somecomfort client.
"""
import somecomfort
try:
self._client = somecomfort.SomeComfort(
self._username, self._password)
except somecomfort.AuthError:
_LOGGER.error("Failed to login to honeywell account %s",
self._username)
return False
except somecomfort.SomeComfortError as ex:
_LOGGER.error("Failed to initialize honeywell client: %s",
str(ex))
return False
devices = [device
for location in self._client.locations_by_id.values()
for device in location.devices_by_id.values()
if device.name == self._device.name]
if len(devices) != 1:
_LOGGER.error("Failed to find device %s", self._device.name)
return False
self._device = devices[0]
return True
|
|
# Copyright (c) 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import excutils
from sqlalchemy import and_
from neutron._i18n import _, _LE
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.db import ipam_backend_mixin
from neutron.db import models_v2
from neutron.ipam import driver
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import requests as ipam_req
LOG = logging.getLogger(__name__)
class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
def _get_failed_ips(self, all_ips, success_ips):
ips_list = (ip_dict['ip_address'] for ip_dict in success_ips)
return (ip_dict['ip_address'] for ip_dict in all_ips
if ip_dict['ip_address'] not in ips_list)
def _ipam_deallocate_ips(self, context, ipam_driver, port, ips,
revert_on_fail=True):
"""Deallocate set of ips over IPAM.
If any single ip deallocation fails, tries to allocate deallocated
ip addresses with fixed ip request
"""
deallocated = []
try:
for ip in ips:
try:
ipam_subnet = ipam_driver.get_subnet(ip['subnet_id'])
ipam_subnet.deallocate(ip['ip_address'])
deallocated.append(ip)
except n_exc.SubnetNotFound:
LOG.debug("Subnet was not found on ip deallocation: %s",
ip)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("An exception occurred during IP deallocation.")
if revert_on_fail and deallocated:
LOG.debug("Reverting deallocation")
self._ipam_allocate_ips(context, ipam_driver, port,
deallocated, revert_on_fail=False)
elif not revert_on_fail and ips:
addresses = ', '.join(self._get_failed_ips(ips,
deallocated))
LOG.error(_LE("IP deallocation failed on "
"external system for %s"), addresses)
return deallocated
def _ipam_try_allocate_ip(self, context, ipam_driver, port, ip_dict):
factory = ipam_driver.get_address_request_factory()
ip_request = factory.get_request(context, port, ip_dict)
ipam_subnet = ipam_driver.get_subnet(ip_dict['subnet_id'])
return ipam_subnet.allocate(ip_request)
def _ipam_allocate_single_ip(self, context, ipam_driver, port, subnets):
"""Allocates single ip from set of subnets
Raises n_exc.IpAddressGenerationFailure if allocation failed for
all subnets.
"""
for subnet in subnets:
try:
return [self._ipam_try_allocate_ip(context, ipam_driver,
port, subnet),
subnet]
except ipam_exc.IpAddressGenerationFailure:
continue
raise n_exc.IpAddressGenerationFailure(
net_id=port['network_id'])
def _ipam_allocate_ips(self, context, ipam_driver, port, ips,
revert_on_fail=True):
"""Allocate set of ips over IPAM.
If any single ip allocation fails, tries to deallocate all
allocated ip addresses.
"""
allocated = []
# we need to start with entries that asked for a specific IP in case
# those IPs happen to be next in the line for allocation for ones that
# didn't ask for a specific IP
ips.sort(key=lambda x: 'ip_address' not in x)
try:
for ip in ips:
# By default IP info is dict, used to allocate single ip
# from single subnet.
# IP info can be list, used to allocate single ip from
# multiple subnets (i.e. first successful ip allocation
# is returned)
ip_list = [ip] if isinstance(ip, dict) else ip
ip_address, ip_subnet = self._ipam_allocate_single_ip(
context, ipam_driver, port, ip_list)
allocated.append({'ip_address': ip_address,
'subnet_id': ip_subnet['subnet_id']})
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("An exception occurred during IP allocation.")
if revert_on_fail and allocated:
LOG.debug("Reverting allocation")
self._ipam_deallocate_ips(context, ipam_driver, port,
allocated, revert_on_fail=False)
elif not revert_on_fail and ips:
addresses = ', '.join(self._get_failed_ips(ips,
allocated))
LOG.error(_LE("IP allocation failed on "
"external system for %s"), addresses)
return allocated
def _ipam_update_allocation_pools(self, context, ipam_driver, subnet):
self.validate_allocation_pools(subnet['allocation_pools'],
subnet['cidr'])
factory = ipam_driver.get_subnet_request_factory()
subnet_request = factory.get_request(context, subnet, None)
ipam_driver.update_subnet(subnet_request)
def delete_subnet(self, context, subnet_id):
ipam_driver = driver.Pool.get_instance(None, context)
ipam_driver.remove_subnet(subnet_id)
def allocate_ips_for_port_and_store(self, context, port, port_id):
# Make a copy of port dict to prevent changing
# incoming dict by adding 'id' to it.
# Deepcopy doesn't work correctly in this case, because copy of
# ATTR_NOT_SPECIFIED object happens. Address of copied object doesn't
# match original object, so 'is' check fails
port_copy = {'port': port['port'].copy()}
port_copy['port']['id'] = port_id
network_id = port_copy['port']['network_id']
ips = []
try:
ips = self._allocate_ips_for_port(context, port_copy)
for ip in ips:
ip_address = ip['ip_address']
subnet_id = ip['subnet_id']
IpamPluggableBackend._store_ip_allocation(
context, ip_address, network_id,
subnet_id, port_id)
return ips
except Exception:
with excutils.save_and_reraise_exception():
if ips:
LOG.debug("An exception occurred during port creation. "
"Reverting IP allocation")
ipam_driver = driver.Pool.get_instance(None, context)
self._ipam_deallocate_ips(context, ipam_driver,
port_copy['port'], ips,
revert_on_fail=False)
def _allocate_ips_for_port(self, context, port):
"""Allocate IP addresses for the port. IPAM version.
If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
addresses for the port. If port['fixed_ips'] contains an IP address or
a subnet_id then allocate an IP address accordingly.
"""
p = port['port']
ips = []
v6_stateless = []
net_id_filter = {'network_id': [p['network_id']]}
subnets = self._get_subnets(context, filters=net_id_filter)
is_router_port = (
p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS_SNAT)
fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED
if fixed_configured:
ips = self._test_fixed_ips_for_port(context,
p["network_id"],
p['fixed_ips'],
p['device_owner'])
# For ports that are not router ports, implicitly include all
# auto-address subnets for address association.
if not is_router_port:
v6_stateless += [subnet for subnet in subnets
if ipv6_utils.is_auto_address_subnet(subnet)]
else:
# Split into v4, v6 stateless and v6 stateful subnets
v4 = []
v6_stateful = []
for subnet in subnets:
if subnet['ip_version'] == 4:
v4.append(subnet)
else:
if ipv6_utils.is_auto_address_subnet(subnet):
if not is_router_port:
v6_stateless.append(subnet)
else:
v6_stateful.append(subnet)
version_subnets = [v4, v6_stateful]
for subnets in version_subnets:
if subnets:
ips.append([{'subnet_id': s['id']}
for s in subnets])
for subnet in v6_stateless:
# IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets
# are implicitly included.
ips.append({'subnet_id': subnet['id'],
'subnet_cidr': subnet['cidr'],
'eui64_address': True,
'mac': p['mac_address']})
ipam_driver = driver.Pool.get_instance(None, context)
return self._ipam_allocate_ips(context, ipam_driver, p, ips)
def _test_fixed_ips_for_port(self, context, network_id, fixed_ips,
device_owner):
"""Test fixed IPs for port.
Check that configured subnets are valid prior to allocating any
IPs. Include the subnet_id in the result if only an IP address is
configured.
:raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork,
InvalidIpForSubnet
"""
fixed_ip_list = []
for fixed in fixed_ips:
subnet = self._get_subnet_for_fixed_ip(context, fixed, network_id)
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
if 'ip_address' in fixed:
if (is_auto_addr_subnet and device_owner not in
constants.ROUTER_INTERFACE_OWNERS):
msg = (_("IPv6 address %(address)s can not be directly "
"assigned to a port on subnet %(id)s since the "
"subnet is configured for automatic addresses") %
{'address': fixed['ip_address'],
'id': subnet['id']})
raise n_exc.InvalidInput(error_message=msg)
fixed_ip_list.append({'subnet_id': subnet['id'],
'ip_address': fixed['ip_address']})
else:
# A scan for auto-address subnets on the network is done
# separately so that all such subnets (not just those
# listed explicitly here by subnet ID) are associated
# with the port.
if (device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT or
not is_auto_addr_subnet):
fixed_ip_list.append({'subnet_id': subnet['id']})
self._validate_max_ips_per_port(fixed_ip_list, device_owner)
return fixed_ip_list
def _update_ips_for_port(self, context, port,
original_ips, new_ips, mac):
"""Add or remove IPs from the port. IPAM version"""
added = []
removed = []
changes = self._get_changed_ips_for_port(
context, original_ips, new_ips, port['device_owner'])
# Check if the IP's to add are OK
to_add = self._test_fixed_ips_for_port(
context, port['network_id'], changes.add,
port['device_owner'])
ipam_driver = driver.Pool.get_instance(None, context)
if changes.remove:
removed = self._ipam_deallocate_ips(context, ipam_driver, port,
changes.remove)
if to_add:
added = self._ipam_allocate_ips(context, ipam_driver,
port, to_add)
return self.Changes(add=added,
original=changes.original,
remove=removed)
def save_allocation_pools(self, context, subnet, allocation_pools):
for pool in allocation_pools:
first_ip = str(netaddr.IPAddress(pool.first, pool.version))
last_ip = str(netaddr.IPAddress(pool.last, pool.version))
ip_pool = models_v2.IPAllocationPool(subnet=subnet,
first_ip=first_ip,
last_ip=last_ip)
context.session.add(ip_pool)
def update_port_with_ips(self, context, db_port, new_port, new_mac):
changes = self.Changes(add=[], original=[], remove=[])
if 'fixed_ips' in new_port:
original = self._make_port_dict(db_port,
process_extensions=False)
changes = self._update_ips_for_port(context,
db_port,
original["fixed_ips"],
new_port['fixed_ips'],
new_mac)
try:
# Check if the IPs need to be updated
network_id = db_port['network_id']
for ip in changes.add:
self._store_ip_allocation(
context, ip['ip_address'], network_id,
ip['subnet_id'], db_port.id)
for ip in changes.remove:
self._delete_ip_allocation(context, network_id,
ip['subnet_id'], ip['ip_address'])
self._update_db_port(context, db_port, new_port, network_id,
new_mac)
except Exception:
with excutils.save_and_reraise_exception():
if 'fixed_ips' in new_port:
LOG.debug("An exception occurred during port update.")
ipam_driver = driver.Pool.get_instance(None, context)
if changes.add:
LOG.debug("Reverting IP allocation.")
self._ipam_deallocate_ips(context, ipam_driver,
db_port, changes.add,
revert_on_fail=False)
if changes.remove:
LOG.debug("Reverting IP deallocation.")
self._ipam_allocate_ips(context, ipam_driver,
db_port, changes.remove,
revert_on_fail=False)
return changes
def delete_port(self, context, id):
# Get fixed_ips list before port deletion
port = self._get_port(context, id)
ipam_driver = driver.Pool.get_instance(None, context)
super(IpamPluggableBackend, self).delete_port(context, id)
# Deallocating ips via IPAM after port is deleted locally.
# So no need to do rollback actions on remote server
# in case of fail to delete port locally
self._ipam_deallocate_ips(context, ipam_driver, port,
port['fixed_ips'])
def update_db_subnet(self, context, id, s, old_pools):
ipam_driver = driver.Pool.get_instance(None, context)
if "allocation_pools" in s:
self._ipam_update_allocation_pools(context, ipam_driver, s)
try:
subnet, changes = super(IpamPluggableBackend,
self).update_db_subnet(context, id,
s, old_pools)
except Exception:
with excutils.save_and_reraise_exception():
if "allocation_pools" in s and old_pools:
LOG.error(
_LE("An exception occurred during subnet update. "
"Reverting allocation pool changes"))
s['allocation_pools'] = old_pools
self._ipam_update_allocation_pools(context, ipam_driver, s)
return subnet, changes
def add_auto_addrs_on_network_ports(self, context, subnet, ipam_subnet):
"""For an auto-address subnet, add addrs for ports on the net."""
with context.session.begin(subtransactions=True):
network_id = subnet['network_id']
port_qry = context.session.query(models_v2.Port)
ports = port_qry.filter(
and_(models_v2.Port.network_id == network_id,
~models_v2.Port.device_owner.in_(
constants.ROUTER_INTERFACE_OWNERS_SNAT)))
updated_ports = []
for port in ports:
ip_request = ipam_req.AutomaticAddressRequest(
prefix=subnet['cidr'],
mac=port['mac_address'])
ip_address = ipam_subnet.allocate(ip_request)
allocated = models_v2.IPAllocation(network_id=network_id,
port_id=port['id'],
ip_address=ip_address,
subnet_id=subnet['id'])
try:
# Do the insertion of each IP allocation entry within
# the context of a nested transaction, so that the entry
# is rolled back independently of other entries whenever
# the corresponding port has been deleted.
with context.session.begin_nested():
context.session.add(allocated)
updated_ports.append(port['id'])
except db_exc.DBReferenceError:
LOG.debug("Port %s was deleted while updating it with an "
"IPv6 auto-address. Ignoring.", port['id'])
LOG.debug("Reverting IP allocation for %s", ip_address)
# Do not fail if reverting allocation was unsuccessful
try:
ipam_subnet.deallocate(ip_address)
except Exception:
LOG.debug("Reverting IP allocation failed for %s",
ip_address)
return updated_ports
def allocate_subnet(self, context, network, subnet, subnetpool_id):
subnetpool = None
if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID:
subnetpool = self._get_subnetpool(context, subnetpool_id)
self._validate_ip_version_with_subnetpool(subnet, subnetpool)
# gateway_ip and allocation pools should be validated or generated
# only for specific request
if subnet['cidr'] is not attributes.ATTR_NOT_SPECIFIED:
subnet['gateway_ip'] = self._gateway_ip_str(subnet,
subnet['cidr'])
subnet['allocation_pools'] = self._prepare_allocation_pools(
subnet['allocation_pools'],
subnet['cidr'],
subnet['gateway_ip'])
ipam_driver = driver.Pool.get_instance(subnetpool, context)
subnet_factory = ipam_driver.get_subnet_request_factory()
subnet_request = subnet_factory.get_request(context, subnet,
subnetpool)
ipam_subnet = ipam_driver.allocate_subnet(subnet_request)
# get updated details with actually allocated subnet
subnet_request = ipam_subnet.get_details()
try:
subnet = self._save_subnet(context,
network,
self._make_subnet_args(
subnet_request,
subnet,
subnetpool_id),
subnet['dns_nameservers'],
subnet['host_routes'],
subnet_request)
except Exception:
# Note(pbondar): Third-party ipam servers can't rely
# on transaction rollback, so explicit rollback call needed.
# IPAM part rolled back in exception handling
# and subnet part is rolled back by transaction rollback.
with excutils.save_and_reraise_exception():
LOG.debug("An exception occurred during subnet creation. "
"Reverting subnet allocation.")
self.delete_subnet(context, subnet_request.subnet_id)
return subnet, ipam_subnet
|
|
import string
import random
from django import forms
from django.conf import settings
from django.contrib.auth import forms as auth_forms
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ValidationError
from django.utils.http import is_safe_url
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy
from oscar.apps.customer.utils import get_password_reset_url, normalise_email
from oscar.core.loading import get_profile_class, get_class, get_model
from oscar.core.compat import get_user_model, existing_user_fields
from oscar.core.validators import password_validators
from oscar.forms import widgets
Dispatcher = get_class('customer.utils', 'Dispatcher')
CommunicationEventType = get_model('customer', 'communicationeventtype')
ProductAlert = get_model('customer', 'ProductAlert')
User = get_user_model()
def generate_username():
# Python 3 uses ascii_letters. If not available, fallback to letters
try:
letters = string.ascii_letters
except AttributeError:
letters = string.letters
uname = ''.join([random.choice(letters + string.digits + '_')
for i in range(30)])
try:
User.objects.get(username=uname)
return generate_username()
except User.DoesNotExist:
return uname
class PasswordResetForm(auth_forms.PasswordResetForm):
"""
This form takes the same structure as its parent from django.contrib.auth
"""
communication_type_code = "PASSWORD_RESET"
def save(self, domain_override=None, use_https=False, request=None,
**kwargs):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
site = get_current_site(request)
if domain_override is not None:
site.domain = site.name = domain_override
email = self.cleaned_data['email']
active_users = User._default_manager.filter(
email__iexact=email, is_active=True)
for user in active_users:
reset_url = self.get_reset_url(site, request, user, use_https)
ctx = {
'user': user,
'site': site,
'reset_url': reset_url}
messages = CommunicationEventType.objects.get_and_render(
code=self.communication_type_code, context=ctx)
Dispatcher().dispatch_user_messages(user, messages)
def get_reset_url(self, site, request, user, use_https):
# the request argument isn't used currently, but implementors might
# need it to determine the correct subdomain
reset_url = "%s://%s%s" % (
'https' if use_https else 'http',
site.domain,
get_password_reset_url(user))
return reset_url
class SetPasswordForm(auth_forms.SetPasswordForm):
def __init__(self, *args, **kwargs):
super(SetPasswordForm, self).__init__(*args, **kwargs)
# Enforce password validations for the new password
self.fields['new_password1'].validators += password_validators
class PasswordChangeForm(auth_forms.PasswordChangeForm):
def __init__(self, *args, **kwargs):
super(PasswordChangeForm, self).__init__(*args, **kwargs)
# Enforce password validations for the new password
self.fields['new_password1'].validators += password_validators
class EmailAuthenticationForm(AuthenticationForm):
"""
Extends the standard django AuthenticationForm, to support 75 character
usernames. 75 character usernames are needed to support the EmailOrUsername
auth backend.
"""
username = forms.EmailField(label=_('Email address'))
redirect_url = forms.CharField(
widget=forms.HiddenInput, required=False)
def __init__(self, host, *args, **kwargs):
self.host = host
super(EmailAuthenticationForm, self).__init__(*args, **kwargs)
def clean_redirect_url(self):
url = self.cleaned_data['redirect_url'].strip()
if url and is_safe_url(url, self.host):
return url
class ConfirmPasswordForm(forms.Form):
"""
Extends the standard django AuthenticationForm, to support 75 character
usernames. 75 character usernames are needed to support the EmailOrUsername
auth backend.
"""
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
super(ConfirmPasswordForm, self).__init__(*args, **kwargs)
self.user = user
def clean_password(self):
password = self.cleaned_data['password']
if not self.user.check_password(password):
raise forms.ValidationError(
_("The entered password is not valid!"))
return password
class EmailUserCreationForm(forms.ModelForm):
email = forms.EmailField(label=_('Email address'))
password1 = forms.CharField(
label=_('Password'), widget=forms.PasswordInput,
validators=password_validators)
password2 = forms.CharField(
label=_('Confirm password'), widget=forms.PasswordInput)
redirect_url = forms.CharField(
widget=forms.HiddenInput, required=False)
class Meta:
model = User
fields = ('email',)
def __init__(self, host=None, *args, **kwargs):
self.host = host
super(EmailUserCreationForm, self).__init__(*args, **kwargs)
def clean_email(self):
"""
Checks for existing users with the supplied email address.
"""
email = normalise_email(self.cleaned_data['email'])
if User._default_manager.filter(email__iexact=email).exists():
raise forms.ValidationError(
_("A user with that email address already exists"))
return email
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data.get('password2', '')
if password1 != password2:
raise forms.ValidationError(
_("The two password fields didn't match."))
return password2
def clean_redirect_url(self):
url = self.cleaned_data['redirect_url'].strip()
if url and is_safe_url(url, self.host):
return url
return settings.LOGIN_REDIRECT_URL
def save(self, commit=True):
user = super(EmailUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data['password1'])
if 'username' in [f.name for f in User._meta.fields]:
user.username = generate_username()
if commit:
user.save()
return user
class OrderSearchForm(forms.Form):
date_from = forms.DateField(
required=False, label=pgettext_lazy("start date", "From"),
widget=widgets.DatePickerInput())
date_to = forms.DateField(
required=False, label=pgettext_lazy("end date", "To"),
widget=widgets.DatePickerInput())
order_number = forms.CharField(required=False, label=_("Order number"))
def clean(self):
if self.is_valid() and not any([self.cleaned_data['date_from'],
self.cleaned_data['date_to'],
self.cleaned_data['order_number']]):
raise forms.ValidationError(_("At least one field is required."))
return super(OrderSearchForm, self).clean()
def description(self):
"""
Uses the form's data to build a useful description of what orders
are listed.
"""
if not self.is_bound or not self.is_valid():
return _('All orders')
else:
date_from = self.cleaned_data['date_from']
date_to = self.cleaned_data['date_to']
order_number = self.cleaned_data['order_number']
return self._orders_description(date_from, date_to, order_number)
def _orders_description(self, date_from, date_to, order_number):
if date_from and date_to:
if order_number:
desc = _('Orders placed between %(date_from)s and '
'%(date_to)s and order number containing '
'%(order_number)s')
else:
desc = _('Orders placed between %(date_from)s and '
'%(date_to)s')
elif date_from:
if order_number:
desc = _('Orders placed since %(date_from)s and '
'order number containing %(order_number)s')
else:
desc = _('Orders placed since %(date_from)s')
elif date_to:
if order_number:
desc = _('Orders placed until %(date_to)s and '
'order number containing %(order_number)s')
else:
desc = _('Orders placed until %(date_to)s')
elif order_number:
desc = _('Orders with order number containing %(order_number)s')
else:
return None
params = {
'date_from': date_from,
'date_to': date_to,
'order_number': order_number,
}
return desc % params
def get_filters(self):
date_from = self.cleaned_data['date_from']
date_to = self.cleaned_data['date_to']
order_number = self.cleaned_data['order_number']
kwargs = {}
if date_from and date_to:
kwargs['date_placed__range'] = [date_from, date_to]
elif date_from and not date_to:
kwargs['date_placed__gt'] = date_from
elif not date_from and date_to:
kwargs['date_placed__lt'] = date_to
if order_number:
kwargs['number__contains'] = order_number
return kwargs
class UserForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
self.user = user
kwargs['instance'] = user
super(UserForm, self).__init__(*args, **kwargs)
if 'email' in self.fields:
self.fields['email'].required = True
def clean_email(self):
"""
Make sure that the email address is aways unique as it is
used instead of the username. This is necessary because the
unique-ness of email addresses is *not* enforced on the model
level in ``django.contrib.auth.models.User``.
"""
email = normalise_email(self.cleaned_data['email'])
if User._default_manager.filter(
email__iexact=email).exclude(id=self.user.id).exists():
raise ValidationError(
_("A user with this email address already exists"))
# Save the email unaltered
return email
class Meta:
model = User
fields = existing_user_fields(['first_name', 'last_name', 'email'])
Profile = get_profile_class()
if Profile: # noqa (too complex (12))
class UserAndProfileForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
try:
instance = Profile.objects.get(user=user)
except Profile.DoesNotExist:
# User has no profile, try a blank one
instance = Profile(user=user)
kwargs['instance'] = instance
super(UserAndProfileForm, self).__init__(*args, **kwargs)
# Get profile field names to help with ordering later
profile_field_names = list(self.fields.keys())
# Get user field names (we look for core user fields first)
core_field_names = set([f.name for f in User._meta.fields])
user_field_names = ['email']
for field_name in ('first_name', 'last_name'):
if field_name in core_field_names:
user_field_names.append(field_name)
user_field_names.extend(User._meta.additional_fields)
# Store user fields so we know what to save later
self.user_field_names = user_field_names
# Add additional user form fields
additional_fields = forms.fields_for_model(
User, fields=user_field_names)
self.fields.update(additional_fields)
# Ensure email is required and initialised correctly
self.fields['email'].required = True
# Set initial values
for field_name in user_field_names:
self.fields[field_name].initial = getattr(user, field_name)
# Ensure order of fields is email, user fields then profile fields
self.fields.keyOrder = user_field_names + profile_field_names
class Meta:
model = Profile
exclude = ('user',)
def clean_email(self):
email = normalise_email(self.cleaned_data['email'])
users_with_email = User._default_manager.filter(
email__iexact=email).exclude(id=self.instance.user.id)
if users_with_email.exists():
raise ValidationError(
_("A user with this email address already exists"))
return email
def save(self, *args, **kwargs):
user = self.instance.user
# Save user also
for field_name in self.user_field_names:
setattr(user, field_name, self.cleaned_data[field_name])
user.save()
return super(ProfileForm, self).save(*args, **kwargs)
ProfileForm = UserAndProfileForm
else:
ProfileForm = UserForm
class ProductAlertForm(forms.ModelForm):
email = forms.EmailField(required=True, label=_(u'Send notification to'),
widget=forms.TextInput(attrs={
'placeholder': _('Enter your email')
}))
def __init__(self, user, product, *args, **kwargs):
self.user = user
self.product = product
super(ProductAlertForm, self).__init__(*args, **kwargs)
# Only show email field to unauthenticated users
if user and user.is_authenticated():
self.fields['email'].widget = forms.HiddenInput()
self.fields['email'].required = False
def save(self, commit=True):
alert = super(ProductAlertForm, self).save(commit=False)
if self.user.is_authenticated():
alert.user = self.user
alert.product = self.product
if commit:
alert.save()
return alert
def clean(self):
cleaned_data = self.cleaned_data
email = cleaned_data.get('email')
if email:
try:
ProductAlert.objects.get(
product=self.product, email__iexact=email,
status=ProductAlert.ACTIVE)
except ProductAlert.DoesNotExist:
pass
else:
raise forms.ValidationError(_(
"There is already an active stock alert for %s") % email)
elif self.user.is_authenticated():
try:
ProductAlert.objects.get(product=self.product,
user=self.user,
status=ProductAlert.ACTIVE)
except ProductAlert.DoesNotExist:
pass
else:
raise forms.ValidationError(_(
"You already have an active alert for this product"))
return cleaned_data
class Meta:
model = ProductAlert
fields = ['email']
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import re
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
ATTR_NOT_SPECIFIED = object()
# Defining a constant to avoid repeating string literal in several modules
SHARED = 'shared'
# Used by range check to indicate no limit for a bound.
UNLIMITED = None
def _verify_dict_keys(expected_keys, target_dict, strict=True):
"""Allows to verify keys in a dictionary.
:param expected_keys: A list of keys expected to be present.
:param target_dict: The dictionary which should be verified.
:param strict: Specifies whether additional keys are allowed to be present.
:return: True, if keys in the dictionary correspond to the specification.
"""
if not isinstance(target_dict, dict):
msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
"with keys: %(expected_keys)s") %
{'target_dict': target_dict, 'expected_keys': expected_keys})
return msg
expected_keys = set(expected_keys)
provided_keys = set(target_dict.keys())
predicate = expected_keys.__eq__ if strict else expected_keys.issubset
if not predicate(provided_keys):
msg = (_("Validation of dictionary's keys failed."
"Expected keys: %(expected_keys)s "
"Provided keys: %(provided_keys)s") %
{'expected_keys': expected_keys,
'provided_keys': provided_keys})
return msg
def is_attr_set(attribute):
return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
def _validate_values(data, valid_values=None):
if data not in valid_values:
msg = (_("'%(data)s' is not in %(valid_values)s") %
{'data': data, 'valid_values': valid_values})
LOG.debug(msg)
return msg
def _validate_not_empty_string_or_none(data, max_len=None):
if data is not None:
return _validate_not_empty_string(data, max_len=max_len)
def _validate_not_empty_string(data, max_len=None):
msg = _validate_string(data, max_len=max_len)
if msg:
return msg
if not data.strip():
return _("'%s' Blank strings are not permitted") % data
def _validate_string_or_none(data, max_len=None):
if data is not None:
return _validate_string(data, max_len=max_len)
def _validate_string(data, max_len=None):
if not isinstance(data, basestring):
msg = _("'%s' is not a valid string") % data
LOG.debug(msg)
return msg
if max_len is not None and len(data) > max_len:
msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
{'data': data, 'max_len': max_len})
LOG.debug(msg)
return msg
def _validate_boolean(data, valid_values=None):
try:
convert_to_boolean(data)
except n_exc.InvalidInput:
msg = _("'%s' is not a valid boolean value") % data
LOG.debug(msg)
return msg
def _validate_range(data, valid_values=None):
"""Check that integer value is within a range provided.
Test is inclusive. Allows either limit to be ignored, to allow
checking ranges where only the lower or upper limit matter.
It is expected that the limits provided are valid integers or
the value None.
"""
min_value = valid_values[0]
max_value = valid_values[1]
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if min_value is not UNLIMITED and data < min_value:
msg = _("'%(data)s' is too small - must be at least "
"'%(limit)d'") % {'data': data, 'limit': min_value}
LOG.debug(msg)
return msg
if max_value is not UNLIMITED and data > max_value:
msg = _("'%(data)s' is too large - must be no larger than "
"'%(limit)d'") % {'data': data, 'limit': max_value}
LOG.debug(msg)
return msg
def _validate_no_whitespace(data):
"""Validates that input has no whitespace."""
if re.search(r'\s', data):
msg = _("'%s' contains whitespace") % data
LOG.debug(msg)
raise n_exc.InvalidInput(error_message=msg)
return data
def _validate_mac_address(data, valid_values=None):
try:
valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
except Exception:
valid_mac = False
# TODO(arosen): The code in this file should be refactored
# so it catches the correct exceptions. _validate_no_whitespace
# raises AttributeError if data is None.
if not valid_mac:
msg = _("'%s' is not a valid MAC address") % data
LOG.debug(msg)
return msg
def _validate_mac_address_or_none(data, valid_values=None):
if data is None:
return
return _validate_mac_address(data, valid_values)
def _validate_ip_address(data, valid_values=None):
try:
netaddr.IPAddress(_validate_no_whitespace(data))
except Exception:
msg = _("'%s' is not a valid IP address") % data
LOG.debug(msg)
return msg
def _validate_ip_pools(data, valid_values=None):
"""Validate that start and end IP addresses are present.
In addition to this the IP addresses will also be validated
"""
if not isinstance(data, list):
msg = _("Invalid data format for IP pool: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['start', 'end']
for ip_pool in data:
msg = _verify_dict_keys(expected_keys, ip_pool)
if msg:
LOG.debug(msg)
return msg
for k in expected_keys:
msg = _validate_ip_address(ip_pool[k])
if msg:
LOG.debug(msg)
return msg
def _validate_fixed_ips(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for fixed IP: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for fixed_ip in data:
if not isinstance(fixed_ip, dict):
msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
LOG.debug(msg)
return msg
if 'ip_address' in fixed_ip:
# Ensure that duplicate entries are not set - just checking IP
# suffices. Duplicate subnet_id's are legitimate.
fixed_ip_address = fixed_ip['ip_address']
if fixed_ip_address in ips:
msg = _("Duplicate IP address '%s'") % fixed_ip_address
else:
msg = _validate_ip_address(fixed_ip_address)
if msg:
LOG.debug(msg)
return msg
ips.append(fixed_ip_address)
if 'subnet_id' in fixed_ip:
msg = _validate_uuid(fixed_ip['subnet_id'])
if msg:
LOG.debug(msg)
return msg
def _validate_ip_or_hostname(host):
ip_err = _validate_ip_address(host)
if not ip_err:
return
name_err = _validate_hostname(host)
if not name_err:
return
msg = _("%(host)s is not a valid IP or hostname. Details: "
"%(ip_err)s, %(name_err)s") % {'ip_err': ip_err, 'host': host,
'name_err': name_err}
return msg
def _validate_nameservers(data, valid_values=None):
if not hasattr(data, '__iter__'):
msg = _("Invalid data format for nameserver: '%s'") % data
LOG.debug(msg)
return msg
hosts = []
for host in data:
# This may be an IP or a hostname
msg = _validate_ip_or_hostname(host)
if msg:
msg = _("'%(host)s' is not a valid nameserver. %(msg)s") % {
'host': host, 'msg': msg}
return msg
if host in hosts:
msg = _("Duplicate nameserver '%s'") % host
LOG.debug(msg)
return msg
hosts.append(host)
def _validate_hostroutes(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for hostroute: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['destination', 'nexthop']
hostroutes = []
for hostroute in data:
msg = _verify_dict_keys(expected_keys, hostroute)
if msg:
LOG.debug(msg)
return msg
msg = _validate_subnet(hostroute['destination'])
if msg:
LOG.debug(msg)
return msg
msg = _validate_ip_address(hostroute['nexthop'])
if msg:
LOG.debug(msg)
return msg
if hostroute in hostroutes:
msg = _("Duplicate hostroute '%s'") % hostroute
LOG.debug(msg)
return msg
hostroutes.append(hostroute)
def _validate_ip_address_or_none(data, valid_values=None):
if data is None:
return None
return _validate_ip_address(data, valid_values)
def _validate_subnet(data, valid_values=None):
msg = None
try:
net = netaddr.IPNetwork(_validate_no_whitespace(data))
if '/' not in data:
msg = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": data,
"cidr": net.cidr}
else:
return
except Exception:
msg = _("'%s' is not a valid IP subnet") % data
if msg:
LOG.debug(msg)
return msg
def _validate_subnet_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
for item in data:
msg = _validate_subnet(item)
if msg:
return msg
def _validate_subnet_or_none(data, valid_values=None):
if data is None:
return
return _validate_subnet(data, valid_values)
def _validate_hostname(data):
# NOTE: An individual name regex instead of an entire FQDN was used
# because its easier to make correct. Feel free to replace with a
# full regex solution. The logic should validate that the hostname
# matches RFC 1123 (section 2.1) and RFC 952.
hostname_pattern = "[a-zA-Z0-9-]{1,63}$"
try:
# Trailing periods are allowed to indicate that a name is fully
# qualified per RFC 1034 (page 7).
trimmed = data if data[-1] != '.' else data[:-1]
if len(trimmed) > 255:
raise TypeError(
_("'%s' exceeds the 255 character hostname limit") % trimmed)
names = trimmed.split('.')
for name in names:
if not name:
raise TypeError(_("Encountered an empty component."))
if name[-1] == '-' or name[0] == '-':
raise TypeError(
_("Name '%s' must not start or end with a hyphen.") % name)
if not re.match(hostname_pattern, name):
raise TypeError(
_("Name '%s' must be 1-63 characters long, each of "
"which can only be alphanumeric or a hyphen.") % name)
# RFC 1123 hints that a TLD can't be all numeric. last is a TLD if
# it's an FQDN.
if len(names) > 1 and re.match("^[0-9]+$", names[-1]):
raise TypeError(_("TLD '%s' must not be all numeric") % names[-1])
except TypeError as e:
msg = _("'%(data)s' is not a valid hostname. Reason: %(reason)s") % {
'data': data, 'reason': e.message}
LOG.debug(msg)
return msg
def _validate_regex(data, valid_values=None):
try:
if re.match(valid_values, data):
return
except TypeError:
pass
msg = _("'%s' is not a valid input") % data
LOG.debug(msg)
return msg
def _validate_regex_or_none(data, valid_values=None):
if data is None:
return
return _validate_regex(data, valid_values)
def _validate_uuid(data, valid_values=None):
if not uuidutils.is_uuid_like(data):
msg = _("'%s' is not a valid UUID") % data
LOG.debug(msg)
return msg
def _validate_uuid_or_none(data, valid_values=None):
if data is not None:
return _validate_uuid(data)
def _validate_uuid_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
for item in data:
msg = _validate_uuid(item)
if msg:
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
def _validate_dict_item(key, key_validator, data):
# Find conversion function, if any, and apply it
conv_func = key_validator.get('convert_to')
if conv_func:
data[key] = conv_func(data.get(key))
# Find validator function
# TODO(salv-orlando): Structure of dict attributes should be improved
# to avoid iterating over items
val_func = val_params = None
for (k, v) in key_validator.iteritems():
if k.startswith('type:'):
# ask forgiveness, not permission
try:
val_func = validators[k]
except KeyError:
return _("Validator '%s' does not exist.") % k
val_params = v
break
# Process validation
if val_func:
return val_func(data.get(key), val_params)
def _validate_dict(data, key_specs=None):
if not isinstance(data, dict):
msg = _("'%s' is not a dictionary") % data
LOG.debug(msg)
return msg
# Do not perform any further validation, if no constraints are supplied
if not key_specs:
return
# Check whether all required keys are present
required_keys = [key for key, spec in key_specs.iteritems()
if spec.get('required')]
if required_keys:
msg = _verify_dict_keys(required_keys, data, False)
if msg:
LOG.debug(msg)
return msg
# Perform validation and conversion of all values
# according to the specifications.
for key, key_validator in [(k, v) for k, v in key_specs.iteritems()
if k in data]:
msg = _validate_dict_item(key, key_validator, data)
if msg:
LOG.debug(msg)
return msg
def _validate_dict_or_none(data, key_specs=None):
if data is not None:
return _validate_dict(data, key_specs)
def _validate_dict_or_empty(data, key_specs=None):
if data != {}:
return _validate_dict(data, key_specs)
def _validate_dict_or_nodata(data, key_specs=None):
if data:
return _validate_dict(data, key_specs)
def _validate_non_negative(data, valid_values=None):
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if data < 0:
msg = _("'%s' should be non-negative") % data
LOG.debug(msg)
return msg
def convert_to_boolean(data):
if isinstance(data, basestring):
val = data.lower()
if val == "true" or val == "1":
return True
if val == "false" or val == "0":
return False
elif isinstance(data, bool):
return data
elif isinstance(data, int):
if data == 0:
return False
elif data == 1:
return True
msg = _("'%s' cannot be converted to boolean") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_boolean_if_not_none(data):
if data is not None:
return convert_to_boolean(data)
def convert_to_int(data):
try:
return int(data)
except (ValueError, TypeError):
msg = _("'%s' is not a integer") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_int_if_not_none(data):
if data is not None:
return convert_to_int(data)
return data
def convert_kvp_str_to_list(data):
"""Convert a value of the form 'key=value' to ['key', 'value'].
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key).
"""
kvp = [x.strip() for x in data.split('=', 1)]
if len(kvp) == 2 and kvp[0]:
return kvp
msg = _("'%s' is not of the form <key>=[value]") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_list_to_dict(kvp_list):
"""Convert a list of 'key=value' strings to a dict.
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key) or if any
of the keys appear more than once.
"""
if kvp_list == ['True']:
# No values were provided (i.e. '--flag-name')
return {}
kvp_map = {}
for kvp_str in kvp_list:
key, value = convert_kvp_str_to_list(kvp_str)
kvp_map.setdefault(key, set())
kvp_map[key].add(value)
return dict((x, list(y)) for x, y in kvp_map.iteritems())
def convert_none_to_empty_list(value):
return [] if value is None else value
def convert_none_to_empty_dict(value):
return {} if value is None else value
def convert_to_list(data):
if data is None:
return []
elif hasattr(data, '__iter__'):
return list(data)
else:
return [data]
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
# Note: In order to ensure that the MAC address is unicast the first byte
# must be even.
MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
# Dictionary that maintains a list of validation functions
validators = {'type:dict': _validate_dict,
'type:dict_or_none': _validate_dict_or_none,
'type:dict_or_empty': _validate_dict_or_empty,
'type:dict_or_nodata': _validate_dict_or_nodata,
'type:fixed_ips': _validate_fixed_ips,
'type:hostroutes': _validate_hostroutes,
'type:ip_address': _validate_ip_address,
'type:ip_address_or_none': _validate_ip_address_or_none,
'type:ip_pools': _validate_ip_pools,
'type:mac_address': _validate_mac_address,
'type:mac_address_or_none': _validate_mac_address_or_none,
'type:nameservers': _validate_nameservers,
'type:non_negative': _validate_non_negative,
'type:range': _validate_range,
'type:regex': _validate_regex,
'type:regex_or_none': _validate_regex_or_none,
'type:string': _validate_string,
'type:string_or_none': _validate_string_or_none,
'type:not_empty_string': _validate_not_empty_string,
'type:not_empty_string_or_none':
_validate_not_empty_string_or_none,
'type:subnet': _validate_subnet,
'type:subnet_list': _validate_subnet_list,
'type:subnet_or_none': _validate_subnet_or_none,
'type:uuid': _validate_uuid,
'type:uuid_or_none': _validate_uuid_or_none,
'type:uuid_list': _validate_uuid_list,
'type:values': _validate_values,
'type:boolean': _validate_boolean}
# Define constants for base resource name
NETWORK = 'network'
NETWORKS = '%ss' % NETWORK
PORT = 'port'
PORTS = '%ss' % PORT
SUBNET = 'subnet'
SUBNETS = '%ss' % SUBNET
# Note: a default of ATTR_NOT_SPECIFIED indicates that an
# attribute is not required, but will be generated by the plugin
# if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED
# is different from an attribute that has been specified with a value of
# None. For example, if 'gateway_ip' is omitted in a request to
# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
# and the default gateway_ip will be generated.
# However, if gateway_ip is specified as None, this means that
# the subnet does not have a gateway IP.
# The following is a short reference for understanding attribute info:
# default: default value of the attribute (if missing, the attribute
# becomes mandatory.
# allow_post: the attribute can be used on POST requests.
# allow_put: the attribute can be used on PUT requests.
# validate: specifies rules for validating data in the attribute.
# convert_to: transformation to apply to the value before it is returned
# is_visible: the attribute is returned in GET responses.
# required_by_policy: the attribute is required by the policy engine and
# should therefore be filled by the API layer even if not present in
# request body.
# enforce_policy: the attribute is actively part of the policy enforcing
# mechanism, ie: there might be rules which refer to this attribute.
RESOURCE_ATTRIBUTE_MAP = {
NETWORKS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'subnets': {'allow_post': False, 'allow_put': False,
'default': [],
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
SHARED: {'allow_post': True,
'allow_put': True,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
},
PORTS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': None},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'mac_address': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:mac_address': None},
'enforce_policy': True,
'is_visible': True},
'fixed_ips': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'convert_list_to': convert_kvp_list_to_dict,
'validate': {'type:fixed_ips': None},
'enforce_policy': True,
'is_visible': True},
'device_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'device_owner': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
SUBNETS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': None},
'is_visible': True},
'ip_version': {'allow_post': True, 'allow_put': False,
'convert_to': convert_to_int,
'validate': {'type:values': [4, 6]},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'cidr': {'allow_post': True, 'allow_put': False,
'validate': {'type:subnet': None},
'is_visible': True},
'gateway_ip': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'allocation_pools': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_pools': None},
'is_visible': True},
'dns_nameservers': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:nameservers': None},
'is_visible': True},
'host_routes': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:hostroutes': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'enable_dhcp': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'ipv6_ra_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values': constants.IPV6_MODES},
'is_visible': True},
'ipv6_address_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values':
constants.IPV6_MODES},
'is_visible': True},
SHARED: {'allow_post': False,
'allow_put': False,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': False,
'required_by_policy': True,
'enforce_policy': True},
}
}
# Identify the attribute used by a resource to reference another resource
RESOURCE_FOREIGN_KEYS = {
NETWORKS: 'network_id'
}
PLURALS = {NETWORKS: NETWORK,
PORTS: PORT,
SUBNETS: SUBNET,
'dns_nameservers': 'dns_nameserver',
'host_routes': 'host_route',
'allocation_pools': 'allocation_pool',
'fixed_ips': 'fixed_ip',
'extensions': 'extension'}
|
|
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.core import paginator
from django.test import TestCase, override_settings
from django.urls import reverse
from wagtail.admin.tests.pages.timestamps import local_datetime
from wagtail.core import hooks
from wagtail.core.models import GroupPagePermission, Locale, Page
from wagtail.tests.testapp.models import SimplePage, SingleEventPage, StandardIndex
from wagtail.tests.utils import WagtailTestUtils
class TestPageExplorer(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
)
self.root_page.add_child(instance=self.child_page)
# more child pages to test ordering
self.old_page = StandardIndex(
title="Old page",
slug="old-page",
latest_revision_created_at=local_datetime(2010, 1, 1)
)
self.root_page.add_child(instance=self.old_page)
self.new_page = SimplePage(
title="New page",
slug="new-page",
content="hello",
latest_revision_created_at=local_datetime(2016, 1, 1)
)
self.root_page.add_child(instance=self.new_page)
# Login
self.user = self.login()
def test_explore(self):
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(self.root_page, response.context['parent_page'])
# child pages should be most recent first
# (with null latest_revision_created_at at the end)
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [self.new_page.id, self.old_page.id, self.child_page.id])
def test_explore_root(self):
response = self.client.get(reverse('wagtailadmin_explore_root'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(Page.objects.get(id=1), response.context['parent_page'])
self.assertTrue(response.context['pages'].paginator.object_list.filter(id=self.root_page.id).exists())
def test_explore_root_shows_icon(self):
response = self.client.get(reverse('wagtailadmin_explore_root'))
self.assertEqual(response.status_code, 200)
# Administrator (or user with add_site permission) should see the
# sites link with the icon-site icon
self.assertContains(
response,
("""<a href="/admin/sites/" class="icon icon-site" """
"""title="Sites menu"></a>""")
)
def test_ordering(self):
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, )),
{'ordering': 'title'}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], 'title')
# child pages should be ordered by title
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [self.child_page.id, self.new_page.id, self.old_page.id])
def test_reverse_ordering(self):
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, )),
{'ordering': '-title'}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], '-title')
# child pages should be ordered by title
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [self.old_page.id, self.new_page.id, self.child_page.id])
def test_ordering_by_last_revision_forward(self):
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, )),
{'ordering': 'latest_revision_created_at'}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], 'latest_revision_created_at')
# child pages should be oldest revision first
# (with null latest_revision_created_at at the start)
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [self.child_page.id, self.old_page.id, self.new_page.id])
def test_invalid_ordering(self):
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, )),
{'ordering': 'invalid_order'}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], '-latest_revision_created_at')
def test_reordering(self):
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, )),
{'ordering': 'ord'}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], 'ord')
# child pages should be ordered by native tree order (i.e. by creation time)
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [self.child_page.id, self.old_page.id, self.new_page.id])
# Pages must not be paginated
self.assertNotIsInstance(response.context['pages'], paginator.Page)
def test_construct_explorer_page_queryset_hook(self):
# testapp implements a construct_explorer_page_queryset hook
# that only returns pages with a slug starting with 'hello'
# when the 'polite_pages_only' URL parameter is set
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, )),
{'polite_pages_only': 'yes_please'}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [self.child_page.id])
def test_construct_explorer_page_queryset_hook_with_ordering(self):
def set_custom_ordering(parent_page, pages, request):
return pages.order_by('-title')
with hooks.register_temporarily('construct_explorer_page_queryset', set_custom_ordering):
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, ))
)
# child pages should be ordered by according to the hook preference
page_ids = [page.id for page in response.context['pages']]
self.assertEqual(page_ids, [self.old_page.id, self.new_page.id, self.child_page.id])
def test_construct_page_listing_buttons_hook(self):
# testapp implements a construct_page_listing_buttons hook
# that add's an dummy button with the label 'Dummy Button' which points
# to '/dummy-button'
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, )),
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertContains(response, 'Dummy Button')
self.assertContains(response, '/dummy-button')
def make_pages(self):
for i in range(150):
self.root_page.add_child(instance=SimplePage(
title="Page " + str(i),
slug="page-" + str(i),
content="hello",
))
def test_pagination(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got the correct page
self.assertEqual(response.context['pages'].number, 2)
def test_pagination_invalid(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 'Hello World!'})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got page one
self.assertEqual(response.context['pages'].number, 1)
def test_pagination_out_of_range(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got the last page
self.assertEqual(response.context['pages'].number, response.context['pages'].paginator.num_pages)
def test_listing_uses_specific_models(self):
# SingleEventPage has custom URL routing; the 'live' link in the listing
# should show the custom URL, which requires us to use the specific version
# of the class
self.new_event = SingleEventPage(
title="New event",
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
latest_revision_created_at=local_datetime(2016, 1, 1)
)
self.root_page.add_child(instance=self.new_event)
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/new-event/pointless-suffix/')
def make_event_pages(self, count):
for i in range(count):
self.root_page.add_child(instance=SingleEventPage(
title="New event " + str(i),
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
latest_revision_created_at=local_datetime(2016, 1, 1)
))
def test_exploring_uses_specific_page_with_custom_display_title(self):
# SingleEventPage has a custom get_admin_display_title method; explorer should
# show the custom title rather than the basic database one
self.make_event_pages(count=1)
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertContains(response, 'New event 0 (single event)')
new_event = SingleEventPage.objects.latest('pk')
response = self.client.get(reverse('wagtailadmin_explore', args=(new_event.id, )))
self.assertContains(response, 'New event 0 (single event)')
def test_parent_page_is_specific(self):
response = self.client.get(reverse('wagtailadmin_explore', args=(self.child_page.id, )))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context['parent_page'], SimplePage)
def test_explorer_no_perms(self):
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
admin = reverse('wagtailadmin_home')
self.assertRedirects(
self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, ))),
admin)
self.assertRedirects(
self.client.get(reverse('wagtailadmin_explore_root')), admin)
def test_explore_with_missing_page_model(self):
# Create a ContentType that doesn't correspond to a real model
missing_page_content_type = ContentType.objects.create(app_label='tests', model='missingpage')
# Turn /home/old-page/ into this content type
Page.objects.filter(id=self.old_page.id).update(content_type=missing_page_content_type)
# try to browse the the listing that contains the missing model
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# try to browse into the page itself
response = self.client.get(reverse('wagtailadmin_explore', args=(self.old_page.id, )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
class TestBreadcrumb(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def test_breadcrumb_uses_specific_titles(self):
self.user = self.login()
# get the explorer view for a subpage of a SimplePage
page = Page.objects.get(url_path='/home/secret-plans/steal-underpants/')
response = self.client.get(reverse('wagtailadmin_explore', args=(page.id, )))
# The breadcrumb should pick up SimplePage's overridden get_admin_display_title method
expected_url = reverse('wagtailadmin_explore', args=(Page.objects.get(url_path='/home/secret-plans/').id, ))
expected = """
<li class="breadcrumb-item">
<a class="breadcrumb-link" href="%s"><span class="title">Secret plans (simple page)</span>
<svg class="icon icon-arrow-right arrow_right_icon" aria-hidden="true" focusable="false">
<use href="#icon-arrow-right"></use>
</svg>
</a>
</li>
""" % expected_url
self.assertContains(response, expected, html=True)
class TestPageExplorerSignposting(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=1)
# Find page with an associated site
self.site_page = Page.objects.get(id=2)
# Add another top-level page (which will have no corresponding site record)
self.no_site_page = SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
)
self.root_page.add_child(instance=self.no_site_page)
# Tests for users that have both add-site permission, and explore permission at the given view;
# warning messages should include advice re configuring sites
def test_admin_at_root(self):
self.login(username='superuser', password='password')
response = self.client.get(reverse('wagtailadmin_explore_root'))
self.assertEqual(response.status_code, 200)
# Administrator (or user with add_site permission) should get the full message
# about configuring sites
self.assertContains(
response,
(
"The root level is where you can add new sites to your Wagtail installation. "
"Pages created here will not be accessible at any URL until they are associated with a site."
)
)
self.assertContains(response, """<a href="/admin/sites/">Configure a site now.</a>""")
def test_admin_at_non_site_page(self):
self.login(username='superuser', password='password')
response = self.client.get(reverse('wagtailadmin_explore', args=(self.no_site_page.id, )))
self.assertEqual(response.status_code, 200)
# Administrator (or user with add_site permission) should get a warning about
# unroutable pages, and be directed to the site config area
self.assertContains(
response,
(
"There is no site set up for this location. "
"Pages created here will not be accessible at any URL until a site is associated with this location."
)
)
self.assertContains(response, """<a href="/admin/sites/">Configure a site now.</a>""")
def test_admin_at_site_page(self):
self.login(username='superuser', password='password')
response = self.client.get(reverse('wagtailadmin_explore', args=(self.site_page.id, )))
self.assertEqual(response.status_code, 200)
# There should be no warning message here
self.assertNotContains(response, "Pages created here will not be accessible")
# Tests for standard users that have explore permission at the given view;
# warning messages should omit advice re configuring sites
def test_nonadmin_at_root(self):
# Assign siteeditor permission over no_site_page, so that the deepest-common-ancestor
# logic allows them to explore root
GroupPagePermission.objects.create(
group=Group.objects.get(name="Site-wide editors"),
page=self.no_site_page, permission_type='add'
)
self.login(username='siteeditor', password='password')
response = self.client.get(reverse('wagtailadmin_explore_root'))
self.assertEqual(response.status_code, 200)
# Non-admin should get a simple "create pages as children of the homepage" prompt
self.assertContains(
response,
"Pages created here will not be accessible at any URL. "
"To add pages to an existing site, create them as children of the homepage."
)
def test_nonadmin_at_non_site_page(self):
# Assign siteeditor permission over no_site_page
GroupPagePermission.objects.create(
group=Group.objects.get(name="Site-wide editors"),
page=self.no_site_page, permission_type='add'
)
self.login(username='siteeditor', password='password')
response = self.client.get(reverse('wagtailadmin_explore', args=(self.no_site_page.id, )))
self.assertEqual(response.status_code, 200)
# Non-admin should get a warning about unroutable pages
self.assertContains(
response,
(
"There is no site record for this location. "
"Pages created here will not be accessible at any URL."
)
)
def test_nonadmin_at_site_page(self):
self.login(username='siteeditor', password='password')
response = self.client.get(reverse('wagtailadmin_explore', args=(self.site_page.id, )))
self.assertEqual(response.status_code, 200)
# There should be no warning message here
self.assertNotContains(response, "Pages created here will not be accessible")
# Tests for users that have explore permission *somewhere*, but not at the view being tested;
# in all cases, they should be redirected to their explorable root
def test_bad_permissions_at_root(self):
# 'siteeditor' does not have permission to explore the root
self.login(username='siteeditor', password='password')
response = self.client.get(reverse('wagtailadmin_explore_root'))
# Users without permission to explore here should be redirected to their explorable root.
self.assertEqual(
(response.status_code, response['Location']),
(302, reverse('wagtailadmin_explore', args=(self.site_page.pk, )))
)
def test_bad_permissions_at_non_site_page(self):
# 'siteeditor' does not have permission to explore no_site_page
self.login(username='siteeditor', password='password')
response = self.client.get(reverse('wagtailadmin_explore', args=(self.no_site_page.id, )))
# Users without permission to explore here should be redirected to their explorable root.
self.assertEqual(
(response.status_code, response['Location']),
(302, reverse('wagtailadmin_explore', args=(self.site_page.pk, )))
)
def test_bad_permissions_at_site_page(self):
# Adjust siteeditor's permission so that they have permission over no_site_page
# instead of site_page
Group.objects.get(name="Site-wide editors").page_permissions.update(page_id=self.no_site_page.id)
self.login(username='siteeditor', password='password')
response = self.client.get(reverse('wagtailadmin_explore', args=(self.site_page.id, )))
# Users without permission to explore here should be redirected to their explorable root.
self.assertEqual(
(response.status_code, response['Location']),
(302, reverse('wagtailadmin_explore', args=(self.no_site_page.pk, )))
)
class TestExplorablePageVisibility(TestCase, WagtailTestUtils):
"""
Test the way that the Explorable Pages functionality manifests within the Explorer.
This is isolated in its own test case because it requires a custom page tree and custom set of
users and groups.
The fixture sets up this page tree:
========================================================
ID Site Path
========================================================
1 /
2 testserver /home/
3 testserver /home/about-us/
4 example.com /example-home/
5 example.com /example-home/content/
6 example.com /example-home/content/page-1/
7 example.com /example-home/content/page-2/
9 example.com /example-home/content/page-2/child-1
8 example.com /example-home/other-content/
10 example2.com /home-2/
========================================================
Group 1 has explore and choose permissions rooted at testserver's homepage.
Group 2 has explore and choose permissions rooted at example.com's page-1.
Group 3 has explore and choose permissions rooted at example.com's other-content.
User "jane" is in Group 1.
User "bob" is in Group 2.
User "sam" is in Groups 1 and 2.
User "josh" is in Groups 2 and 3.
User "mary" is is no Groups, but she has the "access wagtail admin" permission.
User "superman" is an admin.
"""
fixtures = ['test_explorable_pages.json']
# Integration tests adapted from @coredumperror
def test_admin_can_explore_every_page(self):
self.login(username='superman', password='password')
for page in Page.objects.all():
response = self.client.get(reverse('wagtailadmin_explore', args=[page.pk]))
self.assertEqual(response.status_code, 200)
def test_admin_sees_root_page_as_explorer_root(self):
self.login(username='superman', password='password')
response = self.client.get(reverse('wagtailadmin_explore_root'))
self.assertEqual(response.status_code, 200)
# Administrator should see the full list of children of the Root page.
self.assertContains(response, "Welcome to testserver!")
self.assertContains(response, "Welcome to example.com!")
def test_admin_sees_breadcrumbs_up_to_root_page(self):
self.login(username='superman', password='password')
response = self.client.get(reverse('wagtailadmin_explore', args=[6]))
self.assertEqual(response.status_code, 200)
expected = """
<li class="home breadcrumb-item">
<a class="breadcrumb-link" href="/admin/pages/">
<svg class="icon icon-site home_icon" aria-hidden="true" focusable="false">
<use href="#icon-site"></use>
</svg>
<span class="visuallyhidden">Root</span>
<svg class="icon icon-arrow-right arrow_right_icon" aria-hidden="true" focusable="false">
<use href="#icon-arrow-right"></use>
</svg>
</a>
</li>
"""
self.assertContains(response, expected, html=True)
expected = """
<li class="breadcrumb-item">
<a class="breadcrumb-link" href="/admin/pages/4/">
<span class="title">Welcome to example.com!</span>
<svg class="icon icon-arrow-right arrow_right_icon" aria-hidden="true" focusable="false">
<use href="#icon-arrow-right"></use>
</svg>
</a>
</li>
"""
self.assertContains(response, expected, html=True)
expected = """
<li class="breadcrumb-item">
<a class="breadcrumb-link" href="/admin/pages/5/">
<span class="title">Content</span>
<svg class="icon icon-arrow-right arrow_right_icon" aria-hidden="true" focusable="false">
<use href="#icon-arrow-right"></use>
</svg>
</a>
</li>
"""
self.assertContains(response, expected, html=True)
def test_nonadmin_sees_breadcrumbs_up_to_cca(self):
self.login(username='josh', password='password')
response = self.client.get(reverse('wagtailadmin_explore', args=[6]))
self.assertEqual(response.status_code, 200)
# While at "Page 1", Josh should see the breadcrumbs leading only as far back as the example.com homepage,
# since it's his Closest Common Ancestor.
expected = """
<li class="home breadcrumb-item">
<a class="breadcrumb-link" href="/admin/pages/4/">
<svg class="icon icon-site home_icon" aria-hidden="true" focusable="false">
<use href="#icon-site"></use>
</svg>
<span class="visuallyhidden">Home</span>
<svg class="icon icon-arrow-right arrow_right_icon" aria-hidden="true" focusable="false">
<use href="#icon-arrow-right"></use>
</svg>
</a>
</li>
"""
self.assertContains(response, expected, html=True)
expected = """
<li class="breadcrumb-item">
<a class="breadcrumb-link" href="/admin/pages/5/">
<span class="title">Content</span>
<svg class="icon icon-arrow-right arrow_right_icon" aria-hidden="true" focusable="false">
<use href="#icon-arrow-right"></use>
</svg>
</a>
</li>
"""
self.assertContains(response, expected, html=True)
# The page title shouldn't appear because it's the "home" breadcrumb.
self.assertNotContains(response, "Welcome to example.com!")
def test_admin_home_page_changes_with_permissions(self):
self.login(username='bob', password='password')
response = self.client.get(reverse('wagtailadmin_home'))
self.assertEqual(response.status_code, 200)
# Bob should only see the welcome for example.com, not testserver
self.assertContains(response, "Welcome to the example.com Wagtail CMS")
self.assertNotContains(response, "testserver")
def test_breadcrumb_with_no_user_permissions(self):
self.login(username='mary', password='password')
response = self.client.get(reverse('wagtailadmin_home'))
self.assertEqual(response.status_code, 200)
# Since Mary has no page permissions, she should not see the breadcrumb
self.assertNotContains(response, """<li class="home breadcrumb-item"><a class="breadcrumb-link" href="/admin/pages/4/" class="icon icon-home text-replace">Home</a></li>""")
@override_settings(WAGTAIL_I18N_ENABLED=True)
class TestLocaleSelector(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.events_page = Page.objects.get(url_path='/home/events/')
self.fr_locale = Locale.objects.create(language_code='fr')
self.translated_events_page = self.events_page.copy_for_translation(self.fr_locale, copy_parents=True)
self.user = self.login()
def test_locale_selector(self):
response = self.client.get(
reverse('wagtailadmin_explore', args=[self.events_page.id])
)
self.assertContains(response, '<li class="header-meta--locale">')
add_translation_url = reverse('wagtailadmin_explore', args=[self.translated_events_page.id])
self.assertContains(response, f'<a href="{add_translation_url}" aria-label="French" class="u-link is-live">')
@override_settings(WAGTAIL_I18N_ENABLED=False)
def test_locale_selector_not_present_when_i18n_disabled(self):
response = self.client.get(
reverse('wagtailadmin_explore', args=[self.events_page.id])
)
self.assertNotContains(response, '<li class="header-meta--locale">')
add_translation_url = reverse('wagtailadmin_explore', args=[self.translated_events_page.id])
self.assertNotContains(response, f'<a href="{add_translation_url}" aria-label="French" class="u-link is-live">')
|
|
"""Support for Template alarm control panels."""
import logging
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
ENTITY_ID_FORMAT,
FORMAT_NUMBER,
PLATFORM_SCHEMA,
AlarmControlPanelEntity,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
ATTR_CODE,
CONF_NAME,
CONF_VALUE_TEMPLATE,
EVENT_HOMEASSISTANT_START,
MATCH_ALL,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_UNAVAILABLE,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.script import Script
_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_UNAVAILABLE,
]
CONF_ARM_AWAY_ACTION = "arm_away"
CONF_ARM_HOME_ACTION = "arm_home"
CONF_ARM_NIGHT_ACTION = "arm_night"
CONF_DISARM_ACTION = "disarm"
CONF_ALARM_CONTROL_PANELS = "panels"
CONF_CODE_ARM_REQUIRED = "code_arm_required"
ALARM_CONTROL_PANEL_SCHEMA = vol.Schema(
{
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_DISARM_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_ARM_AWAY_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_ARM_HOME_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_ARM_NIGHT_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_CODE_ARM_REQUIRED, default=True): cv.boolean,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ALARM_CONTROL_PANELS): cv.schema_with_slug_keys(
ALARM_CONTROL_PANEL_SCHEMA
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Template Alarm Control Panels."""
alarm_control_panels = []
for device, device_config in config[CONF_ALARM_CONTROL_PANELS].items():
name = device_config.get(CONF_NAME, device)
state_template = device_config.get(CONF_VALUE_TEMPLATE)
disarm_action = device_config.get(CONF_DISARM_ACTION)
arm_away_action = device_config.get(CONF_ARM_AWAY_ACTION)
arm_home_action = device_config.get(CONF_ARM_HOME_ACTION)
arm_night_action = device_config.get(CONF_ARM_NIGHT_ACTION)
code_arm_required = device_config[CONF_CODE_ARM_REQUIRED]
template_entity_ids = set()
if state_template is not None:
temp_ids = state_template.extract_entities()
if str(temp_ids) != MATCH_ALL:
template_entity_ids |= set(temp_ids)
else:
_LOGGER.warning("No value template - will use optimistic state")
if not template_entity_ids:
template_entity_ids = MATCH_ALL
alarm_control_panels.append(
AlarmControlPanelTemplate(
hass,
device,
name,
state_template,
disarm_action,
arm_away_action,
arm_home_action,
arm_night_action,
code_arm_required,
template_entity_ids,
)
)
async_add_entities(alarm_control_panels)
class AlarmControlPanelTemplate(AlarmControlPanelEntity):
"""Representation of a templated Alarm Control Panel."""
def __init__(
self,
hass,
device_id,
name,
state_template,
disarm_action,
arm_away_action,
arm_home_action,
arm_night_action,
code_arm_required,
template_entity_ids,
):
"""Initialize the panel."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = name
self._template = state_template
self._disarm_script = None
self._code_arm_required = code_arm_required
if disarm_action is not None:
self._disarm_script = Script(hass, disarm_action)
self._arm_away_script = None
if arm_away_action is not None:
self._arm_away_script = Script(hass, arm_away_action)
self._arm_home_script = None
if arm_home_action is not None:
self._arm_home_script = Script(hass, arm_home_action)
self._arm_night_script = None
if arm_night_action is not None:
self._arm_night_script = Script(hass, arm_night_action)
self._state = None
self._entities = template_entity_ids
if self._template is not None:
self._template.hass = self.hass
@property
def name(self):
"""Return the display name of this alarm control panel."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
supported_features = 0
if self._arm_night_script is not None:
supported_features = supported_features | SUPPORT_ALARM_ARM_NIGHT
if self._arm_home_script is not None:
supported_features = supported_features | SUPPORT_ALARM_ARM_HOME
if self._arm_away_script is not None:
supported_features = supported_features | SUPPORT_ALARM_ARM_AWAY
return supported_features
@property
def code_format(self):
"""Return one or more digits/characters."""
return FORMAT_NUMBER
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
return self._code_arm_required
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def template_alarm_state_listener(event):
"""Handle target device state changes."""
self.async_schedule_update_ha_state(True)
@callback
def template_alarm_control_panel_startup(event):
"""Update template on startup."""
if self._template is not None and self._entities != MATCH_ALL:
# Track state change only for valid templates
async_track_state_change_event(
self.hass, self._entities, template_alarm_state_listener
)
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, template_alarm_control_panel_startup
)
async def _async_alarm_arm(self, state, script=None, code=None):
"""Arm the panel to specified state with supplied script."""
optimistic_set = False
if self._template is None:
self._state = state
optimistic_set = True
if script is not None:
await script.async_run({ATTR_CODE: code}, context=self._context)
else:
_LOGGER.error("No script action defined for %s", state)
if optimistic_set:
self.async_write_ha_state()
async def async_alarm_arm_away(self, code=None):
"""Arm the panel to Away."""
await self._async_alarm_arm(
STATE_ALARM_ARMED_AWAY, script=self._arm_away_script, code=code
)
async def async_alarm_arm_home(self, code=None):
"""Arm the panel to Home."""
await self._async_alarm_arm(
STATE_ALARM_ARMED_HOME, script=self._arm_home_script, code=code
)
async def async_alarm_arm_night(self, code=None):
"""Arm the panel to Night."""
await self._async_alarm_arm(
STATE_ALARM_ARMED_NIGHT, script=self._arm_night_script, code=code
)
async def async_alarm_disarm(self, code=None):
"""Disarm the panel."""
await self._async_alarm_arm(
STATE_ALARM_DISARMED, script=self._disarm_script, code=code
)
async def async_update(self):
"""Update the state from the template."""
if self._template is None:
return
try:
state = self._template.async_render().lower()
except TemplateError as ex:
_LOGGER.error(ex)
self._state = None
if state in _VALID_STATES:
self._state = state
_LOGGER.debug("Valid state - %s", state)
else:
_LOGGER.error(
"Received invalid alarm panel state: %s. Expected: %s",
state,
", ".join(_VALID_STATES),
)
self._state = None
|
|
from __future__ import unicode_literals
import csv
import io
import json
import logging
import math
import os
from collections import OrderedDict
from django.core.cache import cache
from django.http import Http404
from django.http import HttpResponse
from django.http.response import FileResponse
from django.template.defaultfilters import slugify
from django.utils import translation
from django.utils.translation import get_language_from_request
from django.utils.translation import pgettext
from le_utils.constants import content_kinds
from .models import ContentSessionLog
from .models import ContentSummaryLog
from kolibri.core.auth.models import Facility
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.utils.csv import open_csv_for_writing
from kolibri.utils import conf
logger = logging.getLogger(__name__)
CSV_EXPORT_FILENAMES = {
"session": "{}_{}_content_session_logs.csv",
"summary": "{}_{}_content_summary_logs.csv",
}
def cache_channel_name(obj):
channel_id = obj["channel_id"]
key = "{id}_ChannelMetadata_name".format(id=channel_id)
channel_name = cache.get(key)
if channel_name is None:
try:
channel_name = ChannelMetadata.objects.get(id=channel_id)
except ChannelMetadata.DoesNotExist:
channel_name = ""
cache.set(key, channel_name, 60 * 10)
return channel_name
def cache_content_title(obj):
content_id = obj["content_id"]
key = "{id}_ContentNode_title".format(id=content_id)
title = cache.get(key)
if title is None:
node = ContentNode.objects.filter(content_id=content_id).first()
if node:
title = node.title
else:
title = ""
cache.set(key, title, 60 * 10)
return title
mappings = {
"channel_name": cache_channel_name,
"content_title": cache_content_title,
"time_spent": lambda x: "{:.1f}".format(round(x["time_spent"], 1)),
"progress": lambda x: "{:.4f}".format(math.floor(x["progress"] * 10000.0) / 10000),
}
labels = OrderedDict(
(
("user__facility__name", "Facility name"),
("user__username", "Username"),
("channel_id", "Channel id"),
("channel_name", "Channel name"),
("content_id", "Content id"),
("content_title", "Content title"),
("start_timestamp", "Time of first interaction"),
("end_timestamp", "Time of last interaction"),
("completion_timestamp", "Time of completion"),
("time_spent", "Time Spent (sec)"),
("progress", "Progress (0-1)"),
("kind", "Content kind"),
)
)
def map_object(obj):
mapped_obj = {}
for header, label in labels.items():
if header in mappings:
mapped_obj[label] = mappings[header](obj)
elif header in obj:
mapped_obj[label] = obj[header]
return mapped_obj
classes_info = {
"session": {
"queryset": ContentSessionLog.objects.exclude(kind=content_kinds.QUIZ),
"filename": CSV_EXPORT_FILENAMES["session"],
"db_columns": (
"user__username",
"user__facility__name",
"channel_id",
"content_id",
"start_timestamp",
"end_timestamp",
"time_spent",
"progress",
"kind",
),
},
"summary": {
"queryset": ContentSummaryLog.objects.exclude(kind=content_kinds.QUIZ),
"filename": CSV_EXPORT_FILENAMES["summary"],
"db_columns": (
"user__username",
"user__facility__name",
"content_id",
"channel_id",
"start_timestamp",
"end_timestamp",
"completion_timestamp",
"time_spent",
"progress",
"kind",
),
},
}
def csv_file_generator(facility, log_type, filepath, overwrite=False):
if log_type not in ("summary", "session"):
raise ValueError(
"Impossible to create a csv export file for {}".format(log_type)
)
log_info = classes_info[log_type]
if not overwrite and os.path.exists(filepath):
raise ValueError("{} already exists".format(filepath))
queryset = log_info["queryset"].filter(dataset_id=facility.dataset_id)
# Exclude completion timestamp for the sessionlog CSV
header_labels = tuple(
label
for label in labels.values()
if log_type == "summary" or label != "completion_timestamp"
)
csv_file = open_csv_for_writing(filepath)
with csv_file as f:
writer = csv.DictWriter(f, header_labels)
logger.info("Creating csv file {filename}".format(filename=filepath))
writer.writeheader()
for item in queryset.select_related("user", "user__facility").values(
*log_info["db_columns"]
):
writer.writerow(map_object(item))
yield
def exported_logs_info(request, facility_id, facility):
"""
Get the last modification timestamp of the summary logs exported
:returns: An object with the files informatin
"""
logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
csv_statuses = {}
for log_type in CSV_EXPORT_FILENAMES:
log_path = os.path.join(
logs_dir, CSV_EXPORT_FILENAMES[log_type].format(facility, facility_id[:4])
)
if os.path.exists(log_path):
csv_statuses[log_type] = os.path.getmtime(log_path)
else:
csv_statuses[log_type] = None
return HttpResponse(json.dumps(csv_statuses), content_type="application/json")
def download_csv_file(request, log_type, facility_id):
if facility_id:
facility_name = Facility.objects.get(pk=facility_id).name
else:
facility_name = request.user.facility.name
facility_id = request.user.facility.id
locale = get_language_from_request(request)
translation.activate(locale)
csv_translated_filenames = {
"session": (
"{}_{}_"
+ slugify(
pgettext(
"Default name for the exported CSV file with content session logs. Please keep the underscores between words in the translation",
"content_session_logs",
)
)
+ ".csv"
).replace("-", "_"),
"summary": (
"{}_{}_"
+ slugify(
pgettext(
"Default name for the exported CSV file with content summary logs. Please keep the underscores between words in the translation",
"content_summary_logs",
)
)
+ ".csv"
).replace("-", "_"),
}
if log_type in CSV_EXPORT_FILENAMES.keys():
filepath = os.path.join(
conf.KOLIBRI_HOME,
"log_export",
CSV_EXPORT_FILENAMES[log_type].format(facility_name, facility_id[:4]),
)
else:
filepath = None
# if the file does not exist on disk, return a 404
if filepath is None or not os.path.exists(filepath):
raise Http404("There is no csv export file for {} available".format(log_type))
# generate a file response
response = FileResponse(io.open(filepath, "rb"))
# set the content-type by guessing from the filename
response["Content-Type"] = "text/csv"
# set the content-disposition as attachment to force download
response["Content-Disposition"] = "attachment; filename={}".format(
str(csv_translated_filenames[log_type]).format(facility_name, facility_id[:4])
)
translation.deactivate()
# set the content-length to the file size
response["Content-Length"] = os.path.getsize(filepath)
return response
|
|
#---------------------------------------------------------------------------
# Copyright 2012 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
import os
import sys
import re
from datetime import datetime
import logging
class ItemValue(object):
def __init__(self, value):
self.value = value
if value:
self.value = value.split('^')
def __len__(self):
if self.value:
return len(self.value)
return 0
def __contains__(self, elt):
if self.value:
return elt in self.value
return False
def __getitem__(self, key):
if self.value:
return self.value[key]
return None
def __str__(self):
if self.value:
return "^".join(self.value)
elif self.value is None:
return str(None)
else:
return ""
class GlobalNode(object):
def __init__(self, value=None, subscript=None, parent=None):
self.child = {}
self.value = value
self.subscript = subscript
self.parent=parent
def isRoot(self):
return self.parent is None
def getRootSubscript(self):
if self.isRoot():
return self.subscript
else:
return self.parent.getRootSubscript()
def getRootNode(self):
if self.isRoot():
return self
else:
return self.parent.getRootNode()
def get(self, key, default=None):
return self.child.get(key, default)
def __contains__(self, elt):
return elt in self.child
def __getitem__(self, key):
return self.child[key]
def __setitem__(self, key, value):
self.child[key] = value
value.subscript = key
value.parent = self
def __iter__(self):
return iter(self.child)
def __len__(self):
return len(self.child)
def keys(self):
return self.child.keys()
def values(self):
return self.child.value()
def getIndex(self):
if self.parent:
if self.parent.isRoot():
outId = "%s%s" % (self.parent.getIndex(), self.subscript)
else:
outId = "%s,%s" % (self.parent.getIndex(), self.subscript)
else:
outId = "%s(" % self.subscript
return outId
def __str__(self):
return "%s)=%s" % (self.getIndex(), self.value)
def __sizeof__(self):
return (sys.getsizeof(self.child) +
sys.getsizeof(self.value) +
sys.getsizeof(self.subscript))
def printGlobal(gNode):
if gNode is not None:
if gNode.value is not None: # skip intermediate node
logging.info(gNode)
for item in sorted(gNode, cmp=sortDataEntryFloatFirst):
printGlobal(gNode[item])
else:
return
def countGlobal(gNode):
sum = 1
for item in gNode:
sum += countGlobal(gNode[item])
return sum
def countGlobalSize(gNode):
size = sys.getsizeof(gNode)
for item in gNode:
size += countGlobalSize(gNode[item])
return size
def sortDataEntryFloatFirst(data1, data2):
isData1Float = convertToType(data1, float)
isData2Float = convertToType(data2, float)
if isData1Float and isData2Float:
return cmp(float(data1), float(data2))
if isData1Float:
return -1 # float first
if isData2Float:
return 1
return cmp(data1, data2)
def convertToType(data1, convertFunc):
try:
convertFunc(data1)
return True
except ValueError:
return False
def testGlobalNode():
gn = GlobalNode("root^test", "^ZZTEST")
assert gn.value == 'root^test'
assert gn.subscript == "^ZZTEST"
assert gn.isRoot
assert gn.getRootSubscript() == gn.subscript
gn['test'] = GlobalNode("-1")
for i in xrange(0,5):
gn['test'][i] = GlobalNode(str(i)+'^')
for j in xrange(0,5):
gn['test'][i][j] = GlobalNode("^".join([str(i), str(j)]))
assert len(ItemValue(gn['test'][i].value)) == 2
assert gn['test'].get(6) == None
assert gn['test'][2].value == '2^'
assert 2 in gn['test']
assert countGlobal(gn) == 32
def test_sortDataEntryFloatFirst():
initLst = ['PRE', 'DIST', '22', '1', '0', 'INIT', 'VERSION', '4', 'INI', '%D', '%']
sortedLst = sorted(initLst, cmp=sortDataEntryFloatFirst)
print initLst, sortedLst
def getKeys(globalRoot, func=int):
outKey = []
for key in globalRoot:
try:
idx = func(key)
outKey.append(key)
except ValueError:
pass
return sorted(outKey, key=lambda x: func(x))
def createGlobalNodeByZWRFile(inputFileName):
globalRoot = None
with open(inputFileName, "r") as input:
for idx, line in enumerate(input,0):
if idx <=1:
continue
line = line.strip('\r\n')
if idx == 2: globalRoot = GlobalNode(subscript=line[:line.find('(')])
createGlobalNode(line, globalRoot)
return globalRoot
def getCommonSubscript(one, two):
if not one or not two:
return []
lessOne, moreOne = one, two
if len(one) > len(two):
lessOne, moreOne = two, one
for idx, sub in enumerate(lessOne, 1):
if moreOne[idx-1] != sub:
idx = idx - 1
break
return lessOne[0:idx]
def test_getCommonSubscript():
testOne = [0,1,2,3]
testTwo = [0,3]
testThree = [-1,1]
testFour = None
assert getCommonSubscript(testOne, testTwo) == [0]
assert getCommonSubscript(testOne, testThree) == []
assert getCommonSubscript(testOne, testFour) == []
def resetGlobalIndex(subscripts, glbRootSub):
index = 1
if len(subscripts) == 1:
logging.info("reset index to 0")
index = 0
elif glbRootSub == '^DD' and subscripts[0] == '0':
logging.info("reset index to 0")
index = 0
return index
class DefaultZWRRootGenerator(object):
def __init__(self, inputFileName, glbLoc=None):
self.glbLoc = glbLoc
self.curCommonSub = None
if not glbLoc:
self.index = 1 # set the starting index to be 1
self.rootSub = None
self.commonSubscript = None
else:
self.commonSubscript, value, self.rootSub = findSubscriptValue(glbLoc)
if self.commonSubscript:
self.index = len(self.commonSubscript)
else:
self.index = 0
self.curRoot = None
self.inputFile = open(inputFileName, "r")
self.lineNo = 0
def __iter__(self):
return self
def __del__(self):
self.inputFile.close()
def __next__(self):
return self.next()
def next(self):
if self.inputFile.closed:
raise StopIteration
while True:
line = self.inputFile.readline()
if not line or len(line) == 0:
self.inputFile.close()
if self.curRoot:
retNode = self.curRoot.getRootNode()
if self.curCommonSub:
common = self.curCommonSub[0:self.index]
for sub in common:
retNode = retNode[sub]
elif self.commonSubscript:
for sub in self.commonSubscript:
retNode = retNode[sub]
return retNode
self.lineNo += 1
if self.lineNo <= 2: # ignore the first two lines
continue
line = line.strip('\r\n')
result = self.filterResult(line)
if result is None:
self.inputFile.close()
raise StopIteration
if result == True:
continue
if result:
return result
def filterResult(self, line):
"""
return None to stop reading more information
return False to keep reading more information
return GlobalNode to generate the result
"""
retNode = None
subscripts, value, rootSub = findSubscriptValue(line)
if not subscripts: # must have some subscripts
logging.info("no subscription found %s" % line)
return None
if not self.rootSub:
self.rootSub = rootSub
if rootSub != self.rootSub: # not under the same root, ignore
retNode = self.curRoot
if self.glbLoc:
logging.warn("Different root, expected: %s, real: %s, ignore for now" %
(self.rootSub, rootSub))
self.curRoot = None
return True
else:
self.rootSub = rootSub
self.curCommonSub = subscripts[0:self.index+1]
self.curRoot = createGlobalNode(line)
if retNode:
retNode = retNode.getRootNode()
for sub in self.curCommonSub:
retNode = retNode[sub]
return retNode
else:
return True
if self.commonSubscript and subscripts[0:self.index] != self.commonSubscript:
logging.warn("Different subsript, expected: %s, real: %s, ignore for now" %
(self.commonSubscript, subscripts[0:self.index]))
retNode = self.curRoot
self.curRoot = None
if retNode:
retNode = retNode.getRootNode()
for sub in self.commonSubscript:
retNode = retNode[sub]
return retNode
else:
return True
curCommonScript = getCommonSubscript(subscripts, self.curCommonSub)
#logging.debug(curCommonScript, self.curCommonSub)
if self.curCommonSub is None or self.curCommonSub == curCommonScript:
if self.curCommonSub is None:
self.curCommonSub = subscripts[0:self.index+1]
self.curRoot = createGlobalNode(line, self.curRoot)
return True
else:
retNode = self.curRoot
if retNode:
retNode = retNode.getRootNode()
for subscript in curCommonScript:
retNode = retNode[subscript]
self.curRoot = createGlobalNode(line)
self.curCommonSub = curCommonScript + subscripts[len(curCommonScript):self.index+1]
return retNode
def readGlobalNodeFromZWRFileV2(inputFileName, glbLoc=None):
return DefaultZWRRootGenerator(inputFileName, glbLoc)
def readGlobalNodeFromZWRFile(inputFileName):
""" this is indeed a GlobalNode generator implemented by yield
Assume all nodes subscript layout is always depth first
"""
glbRootSub = None
index = 1
with open(inputFileName, "r") as input:
curRoot, commonSubscript = None, None
for idx, line in enumerate(input,0):
if idx <=1:
continue
line = line.strip('\r\n')
subscripts, value, rootSub = findSubscriptValue(line) # find all the subscripts
if not subscripts:
yield None
return
if idx == 2:
glbRootSub = rootSub
index = resetGlobalIndex(subscripts, glbRootSub)
curCommonScript = getCommonSubscript(subscripts, commonSubscript)
if not curCommonScript:
commonSubscript = subscripts[0:index+1]
logging.debug("com sub: %s, index is %s" % (commonSubscript, index))
if curCommonScript != commonSubscript:
retNode = curRoot
if curCommonScript:
commonSubscript = curCommonScript + subscripts[len(curCommonScript):index+1]
curRoot = createGlobalNode(line)
if retNode:
yield retNode
else:
if not curRoot:
curRoot = GlobalNode(subscript=glbRootSub)
createGlobalNode(line, curRoot)
"""
yield the last part of the global if any
"""
if curRoot:
yield curRoot
def test_createGlobalNodeByZWRFile(inputFileName):
logging.info("start parsing file: %s" % inputFileName)
outGlobal = createGlobalNodeByZWRFile(inputFileName)
print "Total Global is %s" % countGlobal(outGlobal)
print "Total size of Global is %s" % countGlobalSize(outGlobal)
logging.info("end parsing file: %s" % inputFileName)
def test_readGlobalNodeFromZWRFileV2(inputFileName, glbLoc=None):
logging.info("Start reading file: %s" % inputFileName)
totalEntry = 0
for globalRoot in readGlobalNodeFromZWRFileV2(inputFileName, glbLoc):
if globalRoot:
totalEntry += 1
logging.info("Current Entry#: %s" % totalEntry)
logging.info("Curent subscript is %s" % globalRoot.subscript)
printGlobal(globalRoot)
del globalRoot
globalRoot = None
pass
logging.info("Total # of entries: %s" % totalEntry)
logging.info("End reading file: %s" % inputFileName)
def test_readGlobalNodeFromZWRFile(inputFileName):
logging.info("Start reading file: %s" % inputFileName)
totalEntry = 0
for globalRoot in readGlobalNodeFromZWRFile(inputFileName):
if globalRoot:
totalEntry += 1
logging.info("Current Entry#: %s" % totalEntry)
logging.info("Curent subscript is %s" % globalRoot.subscript)
del globalRoot
globalRoot = None
pass
logging.info("Total # of entries: %s" % totalEntry)
logging.info("End reading file: %s" % inputFileName)
totalEntry = 0
def findSubscriptValue(inputLine):
"""
Seperate the subscript part vs value part of the global line
^DD(0,"IX",5)="1^^3^7" should return
[0, IX, 5], 1^^3^7, ^DD
"""
start = inputLine.find("(")
if start <= 0:
return None, None, inputLine
if start == len(inputLine) - 1:
return None, None, inputLine[:-1]
pos = inputLine.find(")=\"")
if pos > start+1:
nodeIndex = [x.strip('"') for x in inputLine[start+1:pos].split(",")]
nodeValue = inputLine[pos+3:-1]
return nodeIndex, nodeValue, inputLine[:start]
else:
nodeIndex = [x.strip('"') for x in inputLine[start+1:].split(",")]
return nodeIndex, None, inputLine[:start]
def test_findSubscriptValue():
for line in [
('''^DD(0,0)="ATTRIBUTE^N^^35"''',
(['0','0'], 'ATTRIBUTE^N^^35', '^DD')),
('''^DD(0,0,"IX","SB",0,.2)=""''',
(['0','0','IX','SB','0','.2'], '', '^DD')),
('''^DD("IX"''', (['IX'],None,'^DD')),
('''^DD''', (None, None,'^DD')),
('''^DD(''', (None, None,'^DD')),
]:
result = findSubscriptValue(line[0])
print line[0], result
assert result == line[1]
def createGlobalNode(inputLine, globalRoot=None):
"""
create Global Node based on the input
if globalRoot is None, it should result the root
node created.
"""
retRoot = globalRoot
nodeIndex, nodeValue, nodeRoot = findSubscriptValue(inputLine)
if nodeIndex:
if nodeValue and len(nodeValue) > 0:
nodeValue = nodeValue.replace('""', '"')
if not globalRoot:
logging.debug("Creating a new globalRoot %s" % inputLine)
retRoot = GlobalNode(subscript=nodeRoot)
nodeIdx = retRoot
else:
logging.debug("finding a new globalRoot")
nodeIdx = retRoot.getRootNode()
logging.debug("The root is %s" % nodeIdx.subscript)
if nodeIdx.subscript != nodeRoot:
logging.error("Global Node root subscript mismatch: %s, %s" %
(nodeRoot, nodeIdx.subscript))
for idx in nodeIndex[:-1]:
if idx not in nodeIdx:
nodeIdx[idx] = GlobalNode()
nodeIdx = nodeIdx[idx]
nodeIdx[nodeIndex[-1]] = GlobalNode(nodeValue)
return retRoot
def test_createGlobalNode():
for line in [
'''^DD(0,0)="ATTRIBUTE^N^^35"''',
'''^DD(0,0,"IX","SB",0,.2)=""''',
'''^DD("IX"''',
]:
curNode = createGlobalNode(line)
printGlobal(curNode)
def test_UtilitiesFunctions():
testGlobalNode()
test_getCommonSubscript()
test_findSubscriptValue()
test_createGlobalNode()
def main():
from LogManager import initConsoleLogging
import sys
from datetime import datetime
import argparse
initConsoleLogging(formatStr='%(asctime)s %(message)s')
test_UtilitiesFunctions()
#test_createGlobalNodeByZWRFile(sys.argv[1])
parser = argparse.ArgumentParser(description='VistA ZWR Global Parser')
parser.add_argument('gdFile', help='path to ZWR file contains Globals data')
parser.add_argument('-glbRoot', help='Global root location for FileMan file')
result = parser.parse_args()
test_readGlobalNodeFromZWRFileV2(result.gdFile, result.glbRoot)
#test_readGlobalNodeFromZWRFile(sys.argv[1])
if __name__ == '__main__':
main()
|
|
"""Support for performing TensorFlow classification on images."""
import io
import logging
import os
import sys
from PIL import Image, ImageDraw
import numpy as np
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingEntity,
draw_box,
)
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = "matches"
ATTR_SUMMARY = "summary"
ATTR_TOTAL_MATCHES = "total_matches"
CONF_AREA = "area"
CONF_BOTTOM = "bottom"
CONF_CATEGORIES = "categories"
CONF_CATEGORY = "category"
CONF_FILE_OUT = "file_out"
CONF_GRAPH = "graph"
CONF_LABELS = "labels"
CONF_LEFT = "left"
CONF_MODEL = "model"
CONF_MODEL_DIR = "model_dir"
CONF_RIGHT = "right"
CONF_TOP = "top"
AREA_SCHEMA = vol.Schema(
{
vol.Optional(CONF_BOTTOM, default=1): cv.small_float,
vol.Optional(CONF_LEFT, default=0): cv.small_float,
vol.Optional(CONF_RIGHT, default=1): cv.small_float,
vol.Optional(CONF_TOP, default=0): cv.small_float,
}
)
CATEGORY_SCHEMA = vol.Schema(
{vol.Required(CONF_CATEGORY): cv.string, vol.Optional(CONF_AREA): AREA_SCHEMA}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_FILE_OUT, default=[]): vol.All(cv.ensure_list, [cv.template]),
vol.Required(CONF_MODEL): vol.Schema(
{
vol.Required(CONF_GRAPH): cv.isfile,
vol.Optional(CONF_AREA): AREA_SCHEMA,
vol.Optional(CONF_CATEGORIES, default=[]): vol.All(
cv.ensure_list, [vol.Any(cv.string, CATEGORY_SCHEMA)]
),
vol.Optional(CONF_LABELS): cv.isfile,
vol.Optional(CONF_MODEL_DIR): cv.isdir,
}
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the TensorFlow image processing platform."""
model_config = config.get(CONF_MODEL)
model_dir = model_config.get(CONF_MODEL_DIR) or hass.config.path("tensorflow")
labels = model_config.get(CONF_LABELS) or hass.config.path(
"tensorflow", "object_detection", "data", "mscoco_label_map.pbtxt"
)
# Make sure locations exist
if not os.path.isdir(model_dir) or not os.path.exists(labels):
_LOGGER.error("Unable to locate tensorflow models or label map")
return
# append custom model path to sys.path
sys.path.append(model_dir)
try:
# Verify that the TensorFlow Object Detection API is pre-installed
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# These imports shouldn't be moved to the top, because they depend on code from the model_dir.
# (The model_dir is created during the manual setup process. See integration docs.)
import tensorflow as tf
from object_detection.utils import label_map_util
except ImportError:
_LOGGER.error(
"No TensorFlow Object Detection library found! Install or compile "
"for your system following instructions here: "
"https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md"
)
return
try:
# Display warning that PIL will be used if no OpenCV is found.
import cv2 # noqa: F401 pylint: disable=unused-import
except ImportError:
_LOGGER.warning(
"No OpenCV library found. TensorFlow will process image with "
"PIL at reduced resolution"
)
# Set up Tensorflow graph, session, and label map to pass to processor
# pylint: disable=no-member
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_config.get(CONF_GRAPH), "rb") as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name="")
session = tf.Session(graph=detection_graph)
label_map = label_map_util.load_labelmap(labels)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=90, use_display_name=True
)
category_index = label_map_util.create_category_index(categories)
entities = []
for camera in config[CONF_SOURCE]:
entities.append(
TensorFlowImageProcessor(
hass,
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
session,
detection_graph,
category_index,
config,
)
)
add_entities(entities)
class TensorFlowImageProcessor(ImageProcessingEntity):
"""Representation of an TensorFlow image processor."""
def __init__(
self,
hass,
camera_entity,
name,
session,
detection_graph,
category_index,
config,
):
"""Initialize the TensorFlow entity."""
model_config = config.get(CONF_MODEL)
self.hass = hass
self._camera_entity = camera_entity
if name:
self._name = name
else:
self._name = "TensorFlow {0}".format(split_entity_id(camera_entity)[1])
self._session = session
self._graph = detection_graph
self._category_index = category_index
self._min_confidence = config.get(CONF_CONFIDENCE)
self._file_out = config.get(CONF_FILE_OUT)
# handle categories and specific detection areas
categories = model_config.get(CONF_CATEGORIES)
self._include_categories = []
self._category_areas = {}
for category in categories:
if isinstance(category, dict):
category_name = category.get(CONF_CATEGORY)
category_area = category.get(CONF_AREA)
self._include_categories.append(category_name)
self._category_areas[category_name] = [0, 0, 1, 1]
if category_area:
self._category_areas[category_name] = [
category_area.get(CONF_TOP),
category_area.get(CONF_LEFT),
category_area.get(CONF_BOTTOM),
category_area.get(CONF_RIGHT),
]
else:
self._include_categories.append(category)
self._category_areas[category] = [0, 0, 1, 1]
# Handle global detection area
self._area = [0, 0, 1, 1]
area_config = model_config.get(CONF_AREA)
if area_config:
self._area = [
area_config.get(CONF_TOP),
area_config.get(CONF_LEFT),
area_config.get(CONF_BOTTOM),
area_config.get(CONF_RIGHT),
]
template.attach(hass, self._file_out)
self._matches = {}
self._total_matches = 0
self._last_image = None
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera_entity
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._total_matches
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_MATCHES: self._matches,
ATTR_SUMMARY: {
category: len(values) for category, values in self._matches.items()
},
ATTR_TOTAL_MATCHES: self._total_matches,
}
def _save_image(self, image, matches, paths):
img = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
img_width, img_height = img.size
draw = ImageDraw.Draw(img)
# Draw custom global region/area
if self._area != [0, 0, 1, 1]:
draw_box(
draw, self._area, img_width, img_height, "Detection Area", (0, 255, 255)
)
for category, values in matches.items():
# Draw custom category regions/areas
if category in self._category_areas and self._category_areas[category] != [
0,
0,
1,
1,
]:
label = "{} Detection Area".format(category.capitalize())
draw_box(
draw,
self._category_areas[category],
img_width,
img_height,
label,
(0, 255, 0),
)
# Draw detected objects
for instance in values:
label = "{0} {1:.1f}%".format(category, instance["score"])
draw_box(
draw, instance["box"], img_width, img_height, label, (255, 255, 0)
)
for path in paths:
_LOGGER.info("Saving results image to %s", path)
img.save(path)
def process_image(self, image):
"""Process the image."""
try:
import cv2 # pylint: disable=import-error
img = cv2.imdecode(np.asarray(bytearray(image)), cv2.IMREAD_UNCHANGED)
inp = img[:, :, [2, 1, 0]] # BGR->RGB
inp_expanded = inp.reshape(1, inp.shape[0], inp.shape[1], 3)
except ImportError:
img = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
img.thumbnail((460, 460), Image.ANTIALIAS)
img_width, img_height = img.size
inp = (
np.array(img.getdata())
.reshape((img_height, img_width, 3))
.astype(np.uint8)
)
inp_expanded = np.expand_dims(inp, axis=0)
image_tensor = self._graph.get_tensor_by_name("image_tensor:0")
boxes = self._graph.get_tensor_by_name("detection_boxes:0")
scores = self._graph.get_tensor_by_name("detection_scores:0")
classes = self._graph.get_tensor_by_name("detection_classes:0")
boxes, scores, classes = self._session.run(
[boxes, scores, classes], feed_dict={image_tensor: inp_expanded}
)
boxes, scores, classes = map(np.squeeze, [boxes, scores, classes])
classes = classes.astype(int)
matches = {}
total_matches = 0
for box, score, obj_class in zip(boxes, scores, classes):
score = score * 100
boxes = box.tolist()
# Exclude matches below min confidence value
if score < self._min_confidence:
continue
# Exclude matches outside global area definition
if (
boxes[0] < self._area[0]
or boxes[1] < self._area[1]
or boxes[2] > self._area[2]
or boxes[3] > self._area[3]
):
continue
category = self._category_index[obj_class]["name"]
# Exclude unlisted categories
if self._include_categories and category not in self._include_categories:
continue
# Exclude matches outside category specific area definition
if self._category_areas and (
boxes[0] < self._category_areas[category][0]
or boxes[1] < self._category_areas[category][1]
or boxes[2] > self._category_areas[category][2]
or boxes[3] > self._category_areas[category][3]
):
continue
# If we got here, we should include it
if category not in matches.keys():
matches[category] = []
matches[category].append({"score": float(score), "box": boxes})
total_matches += 1
# Save Images
if total_matches and self._file_out:
paths = []
for path_template in self._file_out:
if isinstance(path_template, template.Template):
paths.append(
path_template.render(camera_entity=self._camera_entity)
)
else:
paths.append(path_template)
self._save_image(image, matches, paths)
self._matches = matches
self._total_matches = total_matches
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the distributed values library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import packed_distributed_variable as packed
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import ps_values
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import test_util as ds_test_util
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute import tpu_values
from tensorflow.python.distribute import values as values_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import save_context
from tensorflow.python.saved_model import save_options
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.tracking import util as trackable_utils
from tensorflow.python.types import core
from tensorflow.python.util import nest
def _device_str(d):
return "/device:GPU:" + str(d)
def _nested_value(d):
return ("a" + d, ["b" + d, {"c": "d" + d, "e": "f" + d}, "g" + d], "h" + d)
def _make_mirrored_val(init_val=5.0):
v = []
devices = ["/device:GPU:0", "/device:CPU:0"]
for d, _ in zip(devices, ["v", "v/replica"]):
with ops.device(d):
v.append(constant_op.constant(init_val))
return values_lib.Mirrored(v)
def _make_mirrored(distribution=None):
v = []
if distribution:
devices = distribution.extended.worker_devices
else:
devices = ["/device:GPU:0", "/device:CPU:0"]
for d, n, init in zip(devices, ["v", "v/replica"], [1., 2.]):
with ops.device(d):
v.append(
variable_scope.get_variable(
name=n, initializer=init, use_resource=True))
if (distribution is not None) and isinstance(distribution, _TPU_STRATEGIES):
var_cls = tpu_values.TPUMirroredVariable
else:
var_cls = values_lib.MirroredVariable
mirrored = var_cls(distribution, v, variable_scope.VariableAggregation.SUM)
return mirrored
def mirrored_and_tpu_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["graph", "eager"])
class DistributedValuesTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueFromTensor(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
single_value = constant_op.constant(1)
def value_fn(ctx):
del ctx
return single_value
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
self.assertAllEqual(
ds_test_util.gather(distribution, distributed_values),
constant_op.constant(1., shape=(distribution.num_replicas_in_sync)))
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueSingleNumpyArrayConstant(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
array_value = np.array([1., 2., 3.])
def value_fn(ctx):
del ctx
return array_value
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
self.assertAllEqual(
ds_test_util.gather(distribution, distributed_values).numpy(),
[[1., 2., 3.]] * distribution.num_replicas_in_sync)
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueTupleConstant(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
tuple_value = (1., 2., 3.)
def value_fn(ctx):
del ctx
return tuple_value
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
distributed_values = ds_test_util.gather(distribution, distributed_values)
# Expected output for 2 replicas:
# ([1.0, 1.0], [2.0, 2.0], [3.0, 3.0])
expected = tuple([v for i in range(distribution.num_replicas_in_sync)]
for v in tuple_value)
self.assertAllEqual(distributed_values, expected)
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueNestedStructurePerReplica(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
tuple_value = (1., 2., 3.)
def value_fn(ctx):
per_replica = []
for val in tuple_value:
per_replica.append(val * ctx.replica_id_in_sync_group)
return tuple(per_replica)
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
distributed_values = ds_test_util.gather(distribution, distributed_values)
# Expected output for 2 replicas:
# ([0.0, 1.0], [0.0, 2.0], [0.0, 3.0])
expected = tuple([v * i for i in range(distribution.num_replicas_in_sync)]
for v in tuple_value)
self.assertAllEqual(distributed_values, expected)
# NOTE(priyag): Cannot test this with MultiWorkerMirroredStrategy because
# collective ops do not support SparseTensors.
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies_minus_default,
mode=["eager"]
))
def testMakeDistributedValueSpareTensor(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
def value_fn(ctx):
del ctx
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
local_results = distribution.experimental_local_results(distributed_values)
for i in range(distribution.num_replicas_in_sync):
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(local_results[i]),
[[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]])
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueExtractFromArray(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
multiple_values = range(distribution.num_replicas_in_sync)
def value_fn(ctx):
return multiple_values[ctx.replica_id_in_sync_group]
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
distributed_values = ds_test_util.gather(distribution, distributed_values)
expected = range(distribution.num_replicas_in_sync)
self.assertAllEqual(distributed_values, expected)
@combinations.generate(
combinations.combine(
distribution=(strategy_combinations.all_strategies_minus_default +
strategy_combinations.multiworker_strategies),
mode=["eager"]
))
def testMakeDistributedValueAndRun(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
@def_function.function
def run():
multiple_values = range(distribution.num_replicas_in_sync)
def value_fn(ctx):
return multiple_values[ctx.replica_id_in_sync_group]
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
def computation(x):
return math_ops.square(x)
outputs = ds_test_util.gather(
distribution,
distribution.run(computation, args=(distributed_values,)))
return outputs
results = run()
expected = [i**2 for i in range(distribution.num_replicas_in_sync)]
self.assertAllEqual(results, expected)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.central_storage_strategy_with_two_gpus,
] + strategy_combinations.multiworker_strategies,
mode=["eager"]))
def testMakeDistributedValueDefaultDevicePlacement(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
def value_fn(ctx):
del ctx
return constant_op.constant(1.0)
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
default_device = array_ops.identity(constant_op.constant(1.0)).device
for i in range(len(distribution.extended.worker_devices)):
self.assertAllEqual(distributed_values._values[i].device, default_device)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.central_storage_strategy_with_two_gpus,
] + strategy_combinations.multiworker_strategies,
mode=["eager"],
op_type=[constant_op.constant, array_ops.identity]))
def testMakeDistributedValueExplicitDevicePlacement(self, distribution,
op_type):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
worker_devices = distribution.extended.worker_devices
def value_fn(ctx):
# In multi client setup, worker_devices is just the devices on that
# worker.
worker_device_id = ctx.replica_id_in_sync_group % len(worker_devices)
with ops.device(worker_devices[worker_device_id]):
return op_type(1.0)
distributed_values = (
distribution.experimental_distribute_values_from_function(value_fn))
for i in range(len(distribution.extended.worker_devices)):
self.assertAllEqual(distributed_values._values[i].device,
worker_devices[i])
class PerWorkerResourceTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(dataset_fn_as_tf_function=[True, False]))
def testMapFnTracing(self, dataset_fn_as_tf_function):
# For a PerWorkerResource to correctly behave when used in dataset.map,
# it has to be that the map_fn is not traced only once such that
# PerWorkerResource.local_table can return the correct resource. This test
# can detect the potential breakage of this behavior on TAP.
self._traced_once = 0
def map_fn(x):
self._traced_once += 1
return x
def dataset_fn():
dataset = dataset_ops.DatasetV2.from_tensors([0, 1, 2]).repeat().batch(
2, drop_remainder=True)
dataset = dataset.map(map_fn)
return dataset
datasets = []
number_of_input_pipelines = 5
if dataset_fn_as_tf_function:
dataset_fn = def_function.function(dataset_fn)
expected_tracing_times = 1
else:
expected_tracing_times = number_of_input_pipelines
for _ in range(number_of_input_pipelines):
datasets.append(dataset_fn())
self.assertEqual(self._traced_once, expected_tracing_times)
class DistributedDelegateTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testGetAttr(self):
class Foo(object):
def __init__(self, x):
self.x = x
v = values_lib.DistributedDelegate((Foo(7), Foo(8)))
self.assertEqual(7, v.x)
with self.assertRaises(AttributeError):
_ = v.y
@test_util.run_in_graph_and_eager_modes
def testOperatorOverride(self):
v = values_lib.DistributedDelegate((7, 8))
# v should act like int(7).
self.assertEqual(8, v + 1)
self.assertEqual(10, 3 + v)
self.assertEqual(14, v + v)
self.assertEqual(5, v - 2)
self.assertEqual(6, 13 - v)
self.assertEqual(0, v - v)
self.assertEqual(14, v * 2)
self.assertEqual(21, 3 * v)
self.assertEqual(49, v * v)
self.assertEqual(3.5, v / 2)
self.assertEqual(1.5, 10.5 / v)
self.assertEqual(3, v // 2)
self.assertEqual(2, 15 // v)
self.assertEqual(1, v % 2)
self.assertEqual(2, 16 % v)
# pylint: disable=g-generic-assert
self.assertTrue(v < 12)
self.assertTrue(v <= 12)
self.assertFalse(v > 12)
self.assertFalse(v >= 12)
self.assertFalse(12 < v)
self.assertFalse(12 <= v)
self.assertTrue(12 > v)
self.assertTrue(12 >= v)
# pylint: enable=g-generic-assert
self.assertEqual(3, v & 3)
self.assertEqual(3, 11 & v)
self.assertEqual(15, v | 8)
self.assertEqual(23, 16 | v)
self.assertEqual(4, v ^ 3)
self.assertEqual(12, 11 ^ v)
self.assertEqual(343, pow(v, 3))
self.assertEqual(3, pow(v, 3, 10))
self.assertEqual(128, pow(2, v))
self.assertEqual(-7, -v)
self.assertEqual(~7, ~v)
self.assertEqual(7, abs(v))
with self.assertRaises(TypeError):
_ = v[2]
@test_util.run_in_graph_and_eager_modes
def testCopy(self):
class Foo(object):
def __init__(self, x):
self.x = x
v = values_lib.DistributedDelegate((Foo(7), Foo(8)))
v_shallow_copy = copy.copy(v)
self.assertEqual(v.x, v_shallow_copy.x)
v_deep_copy = copy.deepcopy(v)
self.assertEqual(v.x, v_deep_copy.x)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
],
synchronization=[
variables_lib.VariableSynchronization.ON_READ,
variables_lib.VariableSynchronization.ON_WRITE,
],
aggregation=[
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
],
mode=["graph", "eager"],
use_var_policy=[True, False]))
class DistributedVariableTest(test.TestCase, parameterized.TestCase):
def testExtendsVariable(self, distribution, synchronization, aggregation):
with distribution.scope():
v = variables_lib.Variable(
1., synchronization=synchronization, aggregation=aggregation)
self.assertIsInstance(v, variables_lib.Variable)
def testCheckpointing(self, distribution, synchronization, aggregation, mode):
if (isinstance(distribution,
collective_all_reduce_strategy.CollectiveAllReduceStrategy)
and mode == "graph"):
self.skipTest("MWMS combinations tests do not work well in graph mode.")
with distribution.scope():
v = variables_lib.Variable(
constant_op.constant([1., 2., 3., 4]),
synchronization=synchronization,
aggregation=aggregation)
self.evaluate(v.initializer)
before_save = self.evaluate(v.read_value())
# Save random weights into checkpoint.
checkpoint = trackable_utils.Checkpoint(v=v)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
with self.test_session():
save_path = checkpoint.save(prefix)
# Assign inverted value.
self.evaluate(v.assign(constant_op.constant([4., 3., 2., 1.])))
after_assign = self.evaluate(v.read_value())
self.assertNotAllClose(before_save, after_assign)
# Restore from the checkpoint.
with self.test_session():
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
after_restore = self.evaluate(v)
self.assertAllClose(before_save, after_restore)
def testTraceback(self, distribution, synchronization, aggregation):
if context.executing_eagerly():
self.skipTest("does not apply to eager")
with distribution.scope():
variable_scope.get_variable(
name="testVar",
initializer=1.,
use_resource=True,
synchronization=synchronization,
aggregation=aggregation)
with self.assertRaisesRegex(ValueError,
"Variable testVar already exists"):
variable_scope.get_variable(
name="testVar",
initializer=1.,
use_resource=True,
synchronization=synchronization,
aggregation=aggregation)
def testSelectReplica(self, distribution, synchronization, aggregation):
with distribution.scope():
v = variables_lib.Variable(
1., synchronization=synchronization, aggregation=aggregation)
self.assertIs(v, distribute_utils.select_replica(0, v))
def testIsTensorLike(self, distribution, synchronization, aggregation):
if isinstance(distribution.extended,
tpu_strategy.TPUExtended) and context.executing_eagerly():
self.skipTest("TPU doesn't support pure eager")
with distribution.scope():
v = variables_lib.Variable(
0., synchronization=synchronization, aggregation=aggregation)
# In cross replica context.
self.assertIsInstance(v, core.Tensor)
# In replica context.
distribution.run(
lambda v: self.assertIsInstance(v, core.Tensor), args=(v,))
def testAssignReturnValueIsTensorLike(self, distribution, synchronization,
aggregation):
if isinstance(distribution.extended, tpu_strategy.TPUExtended):
if context.executing_eagerly():
self.skipTest("TPU doesn't support pure eager")
else:
self.skipTest("b/152076846")
with distribution.scope():
v = variables_lib.Variable(
0., synchronization=synchronization, aggregation=aggregation)
def assert_is_tensor_like(v):
# We can't use Python literals because they are treated as non-distributed
# values is not allowed when aggregation is SUM. See
# `cross_device_ops.reduce_non_distributed_value`.
delta = array_ops.identity(1.)
self.assertIsInstance(v.assign(delta), core.Tensor)
self.assertIsInstance(v.assign_sub(delta), core.Tensor)
self.assertIsInstance(v.assign_add(delta), core.Tensor)
# In cross replica context we return a PerReplica which is not Tensor like
# all the time yet.
if (synchronization == variables_lib.VariableSynchronization.ON_READ and
aggregation != variables_lib.VariableAggregation.SUM):
assert_is_tensor_like(v)
# In replica context.
distribution.run(assert_is_tensor_like, args=(v,))
def testDeepCopy(self, distribution, synchronization,
aggregation):
if not context.executing_eagerly():
self.skipTest("deepcopy only supported in eager mode")
with distribution.scope():
v = variables_lib.Variable(
0., synchronization=synchronization, aggregation=aggregation)
in_dist_copy = copy.deepcopy(v)
out_dist_copy = copy.deepcopy(v)
def assert_is_deep_copy(v1, v2):
self.assertIsInstance(v2, type(v1))
self.assertEqual(v1.aggregation, v2.aggregation)
self.assertEqual(v1.distribute_strategy, v2.distribute_strategy)
if isinstance(v1, ps_values.AggregatingVariable):
self.assertIsInstance(v2.get(), type(v1.get()))
self.assertNotEqual(id(v1.get()), id(v2.get()))
else:
if v1._policy:
self.assertNotEqual(id(v1._policy), id(v2._policy)) # pylint: disable=protected-access
else:
self.assertEqual(id(v1._policy), id(v2._policy)) # pylint: disable=protected-access
self.assertEqual(len(v1.values), len(v2.values))
for (v1v, v2v) in zip(v1.values, v2.values):
self.assertEqual(v1v.device, v2v.device)
self.assertNotEqual(id(v1v), id(v2v))
self.assertAllEqual(self.evaluate(v1.values),
self.evaluate(v2.values))
self.evaluate(variables_lib.global_variables_initializer())
if not isinstance(distribution.extended, tpu_strategy.TPUExtended):
distribution.run(assert_is_deep_copy, args=(v, in_dist_copy))
distribution.run(assert_is_deep_copy, args=(v, out_dist_copy))
def testAssignSignature(self, distribution, synchronization, aggregation):
# This test verifies assign*() can be called in the same way as normal
# variables.
with distribution.scope():
v = variables_lib.Variable(
0., synchronization=synchronization, aggregation=aggregation)
def assign():
one = constant_op.constant(1.)
v.assign(one, True, "assign", False)
# TODO(b/154017756): SyncOnReadVariable.assign() doesn't support passing
# value as a keyword argument.
v.assign(one, use_locking=True, name="assign", read_value=False)
v.assign_add(one, True, "assign", False)
v.assign_add(one, use_locking=True, name="assign", read_value=False)
v.assign_sub(one, True, "assign", False)
v.assign_sub(one, use_locking=True, name="assign", read_value=False)
# Return something for graph mode to fetch.
return constant_op.constant(1)
self.evaluate(variables_lib.global_variables_initializer())
if not (synchronization == variables_lib.VariableSynchronization.ON_READ
and aggregation == variables_lib.VariableAggregation.SUM):
self.evaluate(distribution.experimental_local_results(assign()))
if not (isinstance(distribution.extended, tpu_strategy.TPUExtended) and
context.executing_eagerly()):
self.evaluate(
distribution.experimental_local_results(distribution.run(assign)))
def testStrategyExtendedUpdate(self, distribution, synchronization,
aggregation):
if len(distribution.extended.parameter_devices) != 2:
self.skipTest("n/a: needs exactly two parameter devices")
if (synchronization == variables_lib.VariableSynchronization.ON_WRITE and
aggregation != variables_lib.VariableAggregation.NONE):
self.skipTest("n/a: doesn't apply to ON_WRITE variable with aggregation")
with distribution.scope():
v = variables_lib.Variable(
0., synchronization=synchronization, aggregation=aggregation)
value = values_lib.PerReplica([1., 2.])
assign_fn = lambda var, value: var.assign(value)
self.evaluate(distribution.extended.update(v, assign_fn, args=(value,)))
self.assertAllEqual(self.evaluate(v.values), [1., 2.])
assign_add_fn = lambda var, value: var.assign_add(value)
self.evaluate(distribution.extended.update(v, assign_add_fn, args=(value,)))
self.assertAllEqual(self.evaluate(v.values), [2., 4.])
assign_sub_fn = lambda var, value: var.assign_sub(value)
self.evaluate(distribution.extended.update(v, assign_sub_fn, args=(value,)))
self.assertAllEqual(self.evaluate(v.values), [1., 2.])
read_assign_fn = lambda var, value: var.assign_add(var.value() + var.
read_value())
self.evaluate(
distribution.extended.update(v, read_assign_fn, args=(value,)))
self.assertAllEqual(self.evaluate(v.values), [3., 6.])
def testSaveNonDistributed(self, distribution, synchronization, aggregation):
# This test verifies that the DistributedVariable behave like the primary
# variable when saving a non-distributed version of the model (the default).
# The test asserts that the function traced under SaveContext has no device
# annotations and only reference the primary component of the variable. Note
# that please avoid capturing other eager tensors in this test to make the
# assertion easy.
if isinstance(distribution.extended,
parameter_server_strategy.ParameterServerStrategyExtended):
self.skipTest("b/148689177: AggregatingVariable doesn't "
"conform to Variable interface well")
# tf.function requires the return value to be Tensors, which is not always
# case for properties and methods of Variable, so we simply discard the
# return values.
def _discard_return(f):
f()
return
def _test(f, v):
# This verifies that the function under SaveContext:
# - contains no device annotations.
# - only references the primary component of the variable.
g = def_function.function(lambda: _discard_return(f))
options = save_options.SaveOptions(
experimental_variable_policy=save_options.VariablePolicy.NONE)
with save_context.save_context(options):
# The graph should contain no device.
graph = g.get_concrete_function().graph
for op in graph.get_operations():
self.assertEqual(op.device, "", msg=str(op))
# The function should only capture the primary variable. Note that it
# may not have captures, e.g. v.aggregation.
captures = list(graph.captures)
self.assertLessEqual(len(captures), 1)
if graph.captures:
self.assertIs(captures[0][0], v._primary.handle)
def _assert(cond):
return control_flow_ops.Assert(cond, [cond])
with distribution.scope():
# We use four variables for convenience reasons. They have no special
# meaning.
# - v is used whenever possible.
# - w is used for scatter and gather, which require the variable to be
# non-scalar.
# - y is used when the dtype needs to be integer. Note that aggregation
# cannot be MEAN for integers.
v = variables_lib.Variable(
0.,
synchronization=synchronization,
aggregation=aggregation,
trainable=True)
w = variables_lib.Variable([0., 0., 0.],
synchronization=synchronization,
aggregation=aggregation,
trainable=True)
if aggregation != variables_lib.VariableAggregation.MEAN:
y = variables_lib.Variable(
0,
synchronization=synchronization,
aggregation=aggregation)
# pylint: disable=g-long-lambda
# tf.Variable properties.
_test(lambda: self.assertEqual(v.aggregation, aggregation), v)
_test(lambda: self.assertIs(v.constraint, None), v)
# TODO(crccw): should we raise an error instead?
_test(lambda: self.assertEqual(v.device, v._primary.device), v)
_test(lambda: self.assertEqual(v.dtype, dtypes.float32), v)
if not context.executing_eagerly():
_test(lambda: self.assertIs(v.graph, v._primary.graph), v)
if not context.executing_eagerly():
_test(lambda: _assert(v.initial_value == 0), v)
_test(lambda: self.assertIs(v.initializer, v._primary.initializer), v)
_test(lambda: self.assertEqual(v.name, "Variable:0"), v)
if not context.executing_eagerly():
_test(lambda: self.assertIs(v.op, v._primary.op), v)
_test(lambda: self.assertEqual(v.shape, tensor_shape.TensorShape(())), v)
_test(lambda: self.assertEqual(v.synchronization, synchronization), v)
_test(lambda: self.assertTrue(v.trainable, True), v)
# tf.Variable methods.
_test(lambda: check_ops.assert_equal_v2(v.assign(1.), 1.), v)
_test(lambda: check_ops.assert_equal_v2(v.assign_add(1.), 2.), v)
_test(lambda: check_ops.assert_equal_v2(v.assign_sub(1.), 1.), v)
# TODO(b/148689177): Implement batch_scatter_update.
# count_up_to() is skipped since it's deprecated.
# eval() is skipped since it shouldn't called in a tf.function.
# experimental_ref() is skipped since it's deprecated.
# from_proto() is skipped since it shouldn't called in a tf.function.
# TODO(b/148689177): Implement gather_nd.
_test(
lambda: check_ops.assert_equal_v2(v.get_shape(),
tensor_shape.TensorShape(())), v)
# initialized_value() is skipped since it shouldn't called in a tf.function.
# load() is skipped since it shouldn't called in a tf.function.
_test(lambda: check_ops.assert_equal_v2(v.read_value(), 1.), v)
# ref() is skipped since it shouldn't called in a tf.function.
_test(
lambda: check_ops.assert_equal_v2(
w.scatter_add(_make_index_slices(values=[1., 2.], indices=[0, 2])),
[1., 0., 2.]), w)
_test(
lambda: check_ops.assert_equal_v2(
w.scatter_div(_make_index_slices(values=[4., 2.], indices=[0, 2])),
[0.25, 0., 1.]), w)
_test(
lambda: check_ops.assert_equal_v2(
w.scatter_max(_make_index_slices(values=[1., 0.5], indices=[1, 2])),
[0.25, 1., 1.]), w)
_test(
lambda: check_ops.assert_equal_v2(
w.scatter_min(_make_index_slices(values=[1., 0.5], indices=[0, 1])),
[0.25, 0.5, 1.]), w)
_test(
lambda: check_ops.assert_equal_v2(
w.scatter_mul(_make_index_slices(values=[2., 0.5], indices=[0, 1])),
[0.5, 0.25, 1.]), w)
# TODO(b/148689177): Implement scatter_nd_*
_test(
lambda: check_ops.assert_equal_v2(
w.scatter_sub(_make_index_slices(values=[2., 0.5], indices=[0, 1])),
[-1.5, -0.25, 1.]), w)
_test(
lambda: check_ops.assert_equal_v2(
w.scatter_update(
_make_index_slices(values=[2., 0.5], indices=[0, 1])),
[2., 0.5, 1.]), w)
# set_shape() is skipped since ResourceVariable doesn't implement it.
# to_proto() is skipped since it shouldn't called in a tf.function.
_test(lambda: check_ops.assert_equal_v2(v.value(), 1.), v)
# DistributedVariable should be treated as ResourceVariable, so it needs to
# conform to ResourceVariable interface as well.
_test(lambda: self.assertIs(v.handle, v._primary.handle), v)
# Convert to tensor.
_test(lambda: check_ops.assert_equal_v2(ops.convert_to_tensor(v), 1.), v)
# Control dependency.
def _with_control_dep():
with ops.control_dependencies([v.assign(1.)]):
return array_ops.identity(1)
_test(_with_control_dep, v)
# Operator overloads.
_test(lambda: check_ops.assert_equal_v2(v.assign(7.), 7.), v)
_test(lambda: check_ops.assert_equal_v2(v + 1., 8.), v)
_test(lambda: check_ops.assert_equal_v2(3 + v, 10.), v)
_test(lambda: check_ops.assert_equal_v2(v + v, 14.), v)
_test(lambda: check_ops.assert_equal_v2(v - 2., 5.), v)
_test(lambda: check_ops.assert_equal_v2(v - v, 0.), v)
_test(lambda: check_ops.assert_equal_v2(v * 2., 14.), v)
_test(lambda: check_ops.assert_equal_v2(3 * v, 21.), v)
_test(lambda: check_ops.assert_equal_v2(v * v, 49.), v)
_test(
lambda: check_ops.assert_equal_v2(
math_ops.cast(v / 2., dtypes.float32), 3.5), v)
_test(
lambda: check_ops.assert_equal_v2(
math_ops.cast(14. / v, dtypes.float32), 2.), v)
_test(lambda: _assert(v < 12.), v)
_test(lambda: _assert(v <= 12.), v)
_test(lambda: _assert(not v > 12.), v)
_test(lambda: _assert(not v >= 12.), v)
_test(lambda: _assert(not 12. < v), v)
_test(lambda: _assert(not 12. <= v), v)
_test(lambda: _assert(12. > v), v)
_test(lambda: _assert(12. >= v), v)
_test(lambda: check_ops.assert_near_v2(pow(v, 3.), 343.), v)
_test(lambda: check_ops.assert_near_v2(pow(2., v), 128.), v)
_test(lambda: check_ops.assert_equal_v2(abs(v), 7.), v)
# Operator overloads that only works for integers.
if aggregation != variables_lib.VariableAggregation.MEAN:
_test(lambda: check_ops.assert_equal_v2(y.assign(7), 7), y)
_test(lambda: check_ops.assert_equal_v2(y // 2, 3), y)
_test(lambda: check_ops.assert_equal_v2(15 // y, 2), y)
_test(lambda: check_ops.assert_equal_v2(y % 2, 1), y)
_test(lambda: check_ops.assert_equal_v2(16 % y, 2), y)
_test(lambda: check_ops.assert_equal_v2(y & 3, 3), y)
_test(lambda: check_ops.assert_equal_v2(3 & y, 3), y)
_test(lambda: check_ops.assert_equal_v2(y | 8, 15), y)
_test(lambda: check_ops.assert_equal_v2(16 | y, 23), y)
_test(lambda: check_ops.assert_equal_v2(y ^ 3, 4), y)
_test(lambda: check_ops.assert_equal_v2(11 ^ y, 12), y)
_test(lambda: check_ops.assert_equal_v2(-y, -7), y)
_test(lambda: check_ops.assert_equal_v2(~y, ~7), y)
# Index.
if isinstance(distribution.extended, tpu_strategy.TPUExtended):
# TODO(b/161572567): slice assignment doesn't work for TPU.
_test(lambda: check_ops.assert_equal_v2(w[0], 2.), w)
else:
_test(lambda: check_ops.assert_equal_v2(w[0].assign(1.), [1., 0.5, 1.]),
w)
_test(lambda: check_ops.assert_equal_v2(w[0], 1.), w)
# pylint: enable=g-long-lambda
def testUnsaveable(self, distribution, synchronization, aggregation, mode):
if isinstance(distribution.extended,
parameter_server_strategy.ParameterServerStrategyExtended):
self.skipTest("n/a: not appliable to AggregatingVariable")
if (isinstance(distribution,
collective_all_reduce_strategy.CollectiveAllReduceStrategy)
and mode == "graph"):
self.skipTest("MWMS combinations tests do not work well in graph mode.")
if not distribution.extended._use_merge_call():
self.skipTest("Unsupported combination.")
with distribution.scope():
v = variables_lib.Variable([1., 1.],
synchronization=synchronization,
aggregation=aggregation)
with self.cached_session():
self.evaluate(variables_lib.global_variables_initializer())
export_dir = self.get_temp_dir()
def _assert_unsaveable(f):
# Ignore if it cannot be traced. Certain combinations are not supported or
# yet or not allowed.
try:
f = def_function.function(f).get_concrete_function()
except (NotImplementedError, ValueError):
return
with self.assertRaisesRegex(ValueError, "f_with_input_signature"):
save.save(v, export_dir, signatures=f)
_assert_unsaveable(lambda: v.assign(ops.convert_to_tensor([1., 1.])))
_assert_unsaveable(lambda: v.assign_add(ops.convert_to_tensor([1., 1.])))
_assert_unsaveable(lambda: v.assign_sub(ops.convert_to_tensor([1., 1.])))
_assert_unsaveable(lambda: v.scatter_add(_make_index_slices([1.], [0])))
_assert_unsaveable(lambda: v.scatter_sub(_make_index_slices([1.], [0])))
_assert_unsaveable(lambda: v.scatter_mul(_make_index_slices([1.], [0])))
_assert_unsaveable(lambda: v.scatter_div(_make_index_slices([1.], [0])))
_assert_unsaveable(lambda: v.scatter_min(_make_index_slices([1.], [0])))
_assert_unsaveable(lambda: v.scatter_max(_make_index_slices([1.], [0])))
_assert_unsaveable(lambda: v.scatter_update(_make_index_slices([1.], [0])))
# Reading a ON_READ variable should be unsaveable if either:
# 1) CollectiveAllReduceStrategy, and aggregation is MEAN/SUM.
# 2) aggregation is SUM.
if (synchronization == variables_lib.VariableSynchronization.ON_READ and
(aggregation == variables_lib.VariableAggregation.SUM or
(not distribution.extended._use_merge_call()) or
(isinstance(distribution.extended,
collective_all_reduce_strategy.CollectiveAllReduceExtended)
and aggregation == variables_lib.VariableAggregation.MEAN))):
_assert_unsaveable(v.read_value)
_assert_unsaveable(v.value)
_assert_unsaveable(lambda: ops.convert_to_tensor(v))
else:
# Otherwise reading a variable should be saveable.
@def_function.function
def f():
v.read_value()
v.value()
return ops.convert_to_tensor(v)
with self.cached_session():
save.save(v, export_dir, signatures=f.get_concrete_function())
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.tpu_strategy,
],
mode=["eager"]))
class PackedDistributedVariableTest(test.TestCase, parameterized.TestCase):
def testPackedVariable(self, distribution):
with distribution.scope():
v0 = variables_lib.Variable(0.)
self.assertIsNone(v0._packed_var)
distribution._enable_packed_variable_in_eager_mode = True
with distribution.scope():
v1 = variables_lib.Variable(0)
self.assertIsInstance(v1._packed_var, packed.PackedDistributedVariable)
devices = v1._devices
for i in range(1, len(devices)):
with distribute_lib.ReplicaContext(distribution, i):
v1.assign(i)
val = v1._get()
self.assertIsInstance(val, packed.PackedVarAndDevice)
self.assertEqual(val.device, devices[0])
self.assertEqual(self.evaluate(val.read_value()), 0)
for i in range(0, len(devices)):
with distribute_lib.ReplicaContext(distribution, i):
val = v1._get()
self.assertIsInstance(val, packed.PackedVarAndDevice)
self.assertEqual(val.device, devices[i])
self.assertEqual(self.evaluate(val.read_value()), i)
def testIgnorePackedVariableInSaveContext(self, distribution):
distribution._enable_packed_variable_in_eager_mode = True
with distribution.scope():
v = variables_lib.Variable(0)
self.assertIsInstance(
v._packed_variable, packed.PackedDistributedVariable)
options = save_options.SaveOptions()
with save_context.save_context(options):
self.assertIsNone(v._packed_variable)
class MirroredVariableTest(test.TestCase, parameterized.TestCase):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
@test_util.run_in_graph_and_eager_modes(config=config)
def testProperties(self):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
mirrored = _make_mirrored()
v = mirrored.values[0]
self.assertEqual(v.name, mirrored.name)
self.assertEqual(v.dtype, mirrored.dtype)
self.assertEqual(v.shape, mirrored.shape)
@test_util.run_in_graph_and_eager_modes(config=config)
def testVariableOnAnotherDevice(self):
v = variable_scope.get_variable(
name="v", initializer=[1.], use_resource=True)
mirrored = values_lib.MirroredVariable(
None, (v,), variable_scope.VariableAggregation.MEAN)
self.assertEqual(v.name, mirrored.name)
self.assertEqual(v.dtype, mirrored.dtype)
self.assertEqual(v.shape, mirrored.shape)
class MirroredVariableSaveRestoreTest(test.TestCase, parameterized.TestCase):
def _assign_mirrored(self, v, new):
for var, n in zip(v.values, new):
self.evaluate(var.assign(n))
def _save_return_saver(self, sess, var):
saver = saver_lib.Saver(var_list=[var])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
return saver.save(sess, prefix), saver
def _save(self, sess, var):
save_path, _ = self._save_return_saver(sess, var)
return save_path
def _save_mirrored(self, distribution):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
mirrored = _make_mirrored(distribution)
# Overwrite the initial values.
self._assign_mirrored(mirrored, [3., 4.])
# Saves the current value of v[0], 3.
save_path = self._save(sess, mirrored)
# Change the values between save and restore.
self._assign_mirrored(mirrored, [5., 6.])
return save_path
def _save_normal(self):
"""Save variables without mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(3.))
# Saves the current value of var, 3.
save_path = self._save(sess, var)
# Change the values between save and restore.
self.evaluate(var.assign(5.))
return save_path
def _restore_normal(self, save_path):
"""Restore to variables without mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=7., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(8.))
# Restores the saved value of 3. to `var`.
saver = saver_lib.Saver(var_list=[var])
saver.restore(sess, save_path)
self.assertEqual(3., self.evaluate(var))
def _restore_mirrored(self, save_path, distribution):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
mirrored = _make_mirrored(distribution)
v = mirrored.values
# Overwrite the initial values.
self._assign_mirrored(mirrored, [7., 8.])
# Restores the saved value of 3. to both variables.
saver = saver_lib.Saver(var_list=[mirrored])
saver.restore(sess, save_path)
self.assertEqual([3., 3.], self.evaluate([v[0], v[1]]))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveAndRestoreMirroredOneGraph(self, distribution):
with self.cached_session() as sess:
mirrored = _make_mirrored(distribution)
v = mirrored .values
# Overwrite the initial values.
self._assign_mirrored(mirrored, [3., 4.])
# Saves the current value of v[0], 3.
save_path, saver = self._save_return_saver(sess, mirrored)
# Change the values between save and restore.
self._assign_mirrored(mirrored, [5., 6.])
# Restores the saved value of 3. to both variables.
saver.restore(sess, save_path)
self.assertEqual([3., 3.], self.evaluate([v[0], v[1]]))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveMirroredRestoreMirrored(self, distribution):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
save_path = self._save_mirrored(distribution)
self._restore_mirrored(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveMirroredRestoreNormal(self, distribution):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
save_path = self._save_mirrored(distribution)
self._restore_normal(save_path)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveNormalRestoreMirrored(self, distribution):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
save_path = self._save_normal()
self._restore_mirrored(save_path, distribution)
_TPU_STRATEGIES = (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)
def _make_replica_local(method, strategy=None):
if strategy is None:
devices = ("/device:GPU:0", "/device:CPU:0")
else:
devices = strategy.extended.worker_devices
v = []
for d, n, init in zip(devices, ["v", "v/replica"], [1., 2.]):
with ops.device(d):
v.append(variable_scope.get_variable(
name=n, initializer=init, use_resource=True))
if (strategy is not None) and isinstance(strategy, _TPU_STRATEGIES):
var_cls = tpu_values.TPUSyncOnReadVariable
else:
var_cls = values_lib.SyncOnReadVariable
replica_local = var_cls(strategy, v, method)
return v, replica_local
class SyncOnReadVariableTest(test.TestCase, parameterized.TestCase):
def _assign_replica_local(self, v, new):
for var, n in zip(v, new):
with ops.device(var.device):
self.evaluate(var.assign(n))
def _save_return_saver(self, sess, var):
saver = saver_lib.Saver(var_list=[var])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
return saver.save(sess, prefix), saver
def _save(self, sess, var):
save_path, _ = self._save_return_saver(sess, var)
return save_path
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
@test_util.run_in_graph_and_eager_modes(config=config)
def testProperties(self):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM)
self.assertEqual(v[0].constraint, replica_local.constraint)
self.assertEqual(v[0].name, replica_local.name)
self.assertEqual(v[0].dtype, replica_local.dtype)
self.assertEqual(v[0].shape, replica_local.shape)
self.assertEqual(variable_scope.VariableAggregation.SUM,
replica_local.aggregation)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=["eager"]))
def testCanPassToDefFun(self, distribution):
@def_function.function
def add1(x):
return x + 1.
with distribution.scope():
v = variables_lib.Variable(
1.,
aggregation=variables_lib.VariableAggregation.MEAN,
synchronization=variables_lib.VariableSynchronization.ON_READ)
self.assertEqual(2., self.evaluate(add1(v)))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testTensorConversion(self, distribution):
with context.graph_mode():
_, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
converted = ops.convert_to_tensor(replica_local, as_ref=False)
self.assertIsInstance(converted, ops.Tensor)
self.assertEqual(converted.dtype, replica_local.dtype)
converted = ops.convert_to_tensor(replica_local, as_ref=True)
# Resources variable are converted to tensors as well when as_ref is True.
self.assertIsInstance(converted, ops.Tensor)
self.assertEqual(converted.dtype, replica_local.dtype)
@combinations.generate(combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
], mode=["eager"]))
def testValueInCrossReplicaContext(self, distribution):
value_list, replica_local = _make_replica_local(
variable_scope.VariableAggregation.ONLY_FIRST_REPLICA, distribution)
self.assertIsInstance(replica_local.value(), ops.Tensor)
self.assertEqual(self.evaluate(replica_local.value()),
self.evaluate(value_list[0].value()))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["eager"]))
def testValueInDefaultReplicaContext(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
0.0,
aggregation=variables_lib.VariableAggregation.SUM,
synchronization=variables_lib.VariableSynchronization.ON_READ)
v2 = variables_lib.Variable(
0.0,
aggregation=variables_lib.VariableAggregation.SUM,
synchronization=variables_lib.VariableSynchronization.ON_READ)
@def_function.function
def replica_fn():
v1.assign_add(1.0)
v2.assign_add(2.0)
distribution.run(replica_fn)
sum_v = v1 + v2
self.assertEqual(sum_v, 6.0)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveAndRestoreReplicaLocalSumOneGraph(self, distribution):
with self.cached_session() as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of v[0] + v[1], 7.
save_path, saver = self._save_return_saver(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
# Restores the saved value of 7. which gets divided equally
# between the variables.
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveAndRestoreReplicaLocalMeanOneGraph(self, distribution):
if context.num_gpus() < 1 and context.executing_eagerly():
self.skipTest("A GPU is not available for this test in eager mode.")
with self.cached_session() as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of (v[0] + v[1])/2, 3.5.
save_path, saver = self._save_return_saver(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
# Restores the saved value of 3.5 to both variables.
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
def _save_replica_local_mean(self, distribution):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [3., 4.])
with distribution.scope():
# Saves the current value of (v[0] + v[1])/2, 3.5
save_path = self._save(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
return save_path
def _save_replica_local_sum(self, distribution):
"""Save variables with mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [1.5, 2.])
with distribution.scope():
# Saves the current value of v[0] + v[1], 3.5
save_path = self._save(sess, replica_local)
# Change the values between save and restore.
self._assign_replica_local(v, [5., 6.])
return save_path
def _save_normal(self):
"""Save variables without mirroring, returns save_path."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=1., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(3.5))
# Saves the current value of var, 3.5.
save_path = self._save(sess, var)
# Change the values between save and restore.
self.evaluate(var.assign(5.))
return save_path
def _restore_normal(self, save_path):
"""Restore to variables without mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
var = variable_scope.get_variable(
name="v", initializer=7., use_resource=True)
# Overwrite the initial value.
self.evaluate(var.assign(8.))
# Restores the saved value of 3.5 to `var`.
saver = saver_lib.Saver(var_list=[var])
saver.restore(sess, save_path)
self.assertEqual(3.5, self.evaluate(var))
def _restore_replica_local_mean(self, save_path, distribution):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.MEAN, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [7., 8.])
with distribution.scope():
# Restores the saved value of 3.5 to both variables.
saver = saver_lib.Saver(var_list=[replica_local])
saver.restore(sess, save_path)
self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]]))
def _restore_replica_local_sum(self, save_path, distribution):
"""Restore to variables with mirroring in a fresh graph."""
with self.session(graph=ops.Graph()) as sess:
v, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM, distribution)
# Overwrite the initial values.
self._assign_replica_local(v, [7., 8.])
with distribution.scope():
# Restores the saved value of 3.5 to both variables.
saver = saver_lib.Saver(var_list=[replica_local])
saver.restore(sess, save_path)
self.assertEqual([1.75, 1.75], self.evaluate([v[0], v[1]]))
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalRestoreReplicaLocalMean(self, distribution):
save_path = self._save_replica_local_mean(distribution)
self._restore_replica_local_mean(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalRestoreReplicaLocalSum(self, distribution):
save_path = self._save_replica_local_sum(distribution)
self._restore_replica_local_sum(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalMeanRestoreNormal(self, distribution):
save_path = self._save_replica_local_mean(distribution)
self._restore_normal(save_path)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveReplicaLocalSumRestoreNormal(self, distribution):
save_path = self._save_replica_local_sum(distribution)
self._restore_normal(save_path)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveNormalRestoreReplicaLocalMean(self, distribution):
save_path = self._save_normal()
self._restore_replica_local_mean(save_path, distribution)
@combinations.generate(mirrored_and_tpu_strategy_combinations())
def testSaveNormalRestoreReplicaLocalSum(self, distribution):
save_path = self._save_normal()
self._restore_replica_local_sum(save_path, distribution)
class MirroredTest(test.TestCase):
def testAddOp(self):
if context.num_gpus() < 1:
self.skipTest("A GPU is not available for this test.")
mirrored_val = _make_mirrored_val(init_val=3.)
self.assertEqual(self.evaluate(constant_op.constant(6.)),
self.evaluate(mirrored_val + mirrored_val))
self.assertEqual(self.evaluate(constant_op.constant(4.)),
self.evaluate(mirrored_val + 1))
self.assertEqual(self.evaluate(mirrored_val + 1),
self.evaluate(math_ops.add(mirrored_val, 1)))
self.assertEqual(type(mirrored_val + 1),
type(math_ops.add(mirrored_val, 1)))
class PerReplicaTest(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=["eager"]))
def testTypeSpec(self):
vals = (constant_op.constant(1.),)
per_replica = values_lib.PerReplica(vals)
spec = per_replica._type_spec
self.assertEqual(spec._value_specs,
(tensor_spec.TensorSpec([], dtypes.float32),))
@combinations.generate(combinations.combine(mode=["eager"]))
def testTypeSpecRoundTrip(self):
vals = (constant_op.constant(1.),)
per_replica = values_lib.PerReplica(vals)
spec = per_replica._type_spec
tensor_list = spec._to_components(per_replica)
reconstructed = spec._from_components(tensor_list)
self.assertAllEqual(per_replica.values, reconstructed.values)
@combinations.generate(combinations.combine(mode=["eager"]))
def testTypeSpecNest(self):
vals = (constant_op.constant(1.), constant_op.constant([5., 6.0]),)
per_replica = values_lib.PerReplica(vals)
# Note: nest.map_structure exercises nest.flatten and
# nest.pack_sequence_as.
result = nest.map_structure(
lambda t: t + 10, per_replica, expand_composites=True)
self.assertLen(result.values, 2)
self.assertAllEqual(result.values[0], 11.)
self.assertAllEqual(result.values[1], [15., 16.0])
@test_util.run_in_graph_and_eager_modes
def testIsGraphTensor(self):
per_replica = values_lib.PerReplica((constant_op.constant(1.),))
for t in nest.flatten(per_replica, expand_composites=True):
self.assertEqual(hasattr(t, "graph"), not context.executing_eagerly())
@combinations.generate(combinations.combine(mode=["eager"]))
def testDoesNotTriggerFunctionTracing(self):
traces = []
@def_function.function
def f(x):
traces.append(None) # Only happens on trace.
return x
per_replica = values_lib.PerReplica((constant_op.constant(1.),))
# Trace once.
f(per_replica)
self.assertNotEmpty(traces)
del traces[:]
per_replica_spec = per_replica._type_spec
for _ in range(5):
vals = per_replica_spec._to_components(per_replica)
vals = [v * 2 for v in vals]
per_replica = per_replica_spec._from_components(vals)
output = f(per_replica)
self.assertIsInstance(output, values_lib.PerReplica)
self.assertAllEqual(output._values, per_replica._values)
self.assertEmpty(traces) # Make sure we're not re-tracing `f`.
@combinations.generate(combinations.combine(mode=["eager"]))
def testFunctionCanReturnPerReplica(self):
f = def_function.function(lambda x: x)
x = values_lib.PerReplica((constant_op.constant(1.),))
y = f(x)
self.assertIsNot(x, y)
nest.map_structure(self.assertAllEqual, x, y, expand_composites=True)
self.assertEqual(x._type_spec, y._type_spec)
@test_util.run_in_graph_and_eager_modes
def testCondWithTensorValues(self):
per_replica_1 = values_lib.PerReplica((constant_op.constant("a"),))
per_replica_2 = values_lib.PerReplica((constant_op.constant(["b", "c"]),))
condition = array_ops.placeholder_with_default(True, [])
result = control_flow_ops.cond(
condition, lambda: per_replica_1, lambda: per_replica_2)
self.assertLen(result.values, 1)
self.assertAllEqual(result.values[0], "a")
@test_util.run_in_graph_and_eager_modes
def testCondWithValuesConvertibleToTensor(self):
per_replica_1 = values_lib.PerReplica(("a",))
per_replica_2 = values_lib.PerReplica(("b",))
condition = array_ops.placeholder_with_default(True, [])
result = control_flow_ops.cond(
condition, lambda: per_replica_1, lambda: per_replica_2)
self.assertLen(result.values, 1)
self.assertAllEqual(result.values[0], "a")
@test_util.build_as_function_and_v1_graph
def testCondWithValuesNotConvertibleToTensor(self):
per_replica_1 = values_lib.PerReplica(({"a"},))
per_replica_2 = values_lib.PerReplica(({"b", "c"},))
condition = array_ops.placeholder(dtypes.bool, [])
with self.assertRaisesRegex(TypeError, "Could not build a TypeSpec for"):
control_flow_ops.cond(
condition, lambda: per_replica_1, lambda: per_replica_2)
def _make_index_slices(values, indices, dense_shape=None):
if dense_shape:
dense_shape = array_ops.identity(dense_shape)
return indexed_slices.IndexedSlices(
array_ops.identity(values), array_ops.identity(indices), dense_shape)
if __name__ == "__main__":
ds_test_util.main()
|
|
from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing import pickleable
from sqlalchemy.util import pickle
import inspect
from sqlalchemy.orm import create_session, sessionmaker, attributes, \
make_transient, make_transient_to_detached, Session
import sqlalchemy as sa
from sqlalchemy.testing import engines, config
from sqlalchemy import testing
from sqlalchemy import Integer, String, Sequence
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, backref, joinedload, \
exc as orm_exc, object_session, was_deleted
from sqlalchemy.util import pypy
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy import event, ForeignKey
class ExecutionTest(_fixtures.FixtureTest):
run_inserts = None
__backend__ = True
@testing.requires.sequences
def test_sequence_execute(self):
seq = Sequence("some_sequence")
seq.create(testing.db)
try:
sess = create_session(bind=testing.db)
eq_(sess.execute(seq), 1)
finally:
seq.drop(testing.db)
def test_textual_execute(self):
"""test that Session.execute() converts to text()"""
users = self.tables.users
sess = create_session(bind=self.metadata.bind)
users.insert().execute(id=7, name='jack')
# use :bindparam style
eq_(sess.execute("select * from users where id=:id",
{'id': 7}).fetchall(),
[(7, 'jack')])
# use :bindparam style
eq_(sess.scalar("select id from users where id=:id",
{'id': 7}),
7)
def test_parameter_execute(self):
users = self.tables.users
sess = Session(bind=testing.db)
sess.execute(users.insert(), [
{"id": 7, "name": "u7"},
{"id": 8, "name": "u8"}
]
)
sess.execute(users.insert(), {"id": 9, "name": "u9"})
eq_(
sess.execute(sa.select([users.c.id]).\
order_by(users.c.id)).fetchall(),
[(7, ), (8, ), (9, )]
)
class TransScopingTest(_fixtures.FixtureTest):
run_inserts = None
__prefer_requires__ = "independent_connections",
def test_no_close_on_flush(self):
"""Flush() doesn't close a connection the session didn't open"""
User, users = self.classes.User, self.tables.users
c = testing.db.connect()
c.execute("select * from users")
mapper(User, users)
s = create_session(bind=c)
s.add(User(name='first'))
s.flush()
c.execute("select * from users")
def test_close(self):
"""close() doesn't close a connection the session didn't open"""
User, users = self.classes.User, self.tables.users
c = testing.db.connect()
c.execute("select * from users")
mapper(User, users)
s = create_session(bind=c)
s.add(User(name='first'))
s.flush()
c.execute("select * from users")
s.close()
c.execute("select * from users")
@testing.requires.independent_connections
@engines.close_open_connections
def test_transaction(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
conn1 = testing.db.connect()
conn2 = testing.db.connect()
sess = create_session(autocommit=False, bind=conn1)
u = User(name='x')
sess.add(u)
sess.flush()
assert conn1.execute("select count(1) from users").scalar() == 1
assert conn2.execute("select count(1) from users").scalar() == 0
sess.commit()
assert conn1.execute("select count(1) from users").scalar() == 1
assert testing.db.connect().execute('select count(1) from users'
).scalar() == 1
sess.close()
class SessionUtilTest(_fixtures.FixtureTest):
run_inserts = None
def test_object_session_raises(self):
User = self.classes.User
assert_raises(
orm_exc.UnmappedInstanceError,
object_session,
object()
)
assert_raises(
orm_exc.UnmappedInstanceError,
object_session,
User()
)
def test_make_transient(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
sess.add(User(name='test'))
sess.flush()
u1 = sess.query(User).first()
make_transient(u1)
assert u1 not in sess
sess.add(u1)
assert u1 in sess.new
u1 = sess.query(User).first()
sess.expunge(u1)
make_transient(u1)
sess.add(u1)
assert u1 in sess.new
# test expired attributes
# get unexpired
u1 = sess.query(User).first()
sess.expire(u1)
make_transient(u1)
assert u1.id is None
assert u1.name is None
# works twice
make_transient(u1)
sess.close()
u1.name = 'test2'
sess.add(u1)
sess.flush()
assert u1 in sess
sess.delete(u1)
sess.flush()
assert u1 not in sess
assert_raises(sa.exc.InvalidRequestError, sess.add, u1)
make_transient(u1)
sess.add(u1)
sess.flush()
assert u1 in sess
def test_make_transient_plus_rollback(self):
# test for [ticket:2182]
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(name='test')
sess.add(u1)
sess.commit()
sess.delete(u1)
sess.flush()
make_transient(u1)
sess.rollback()
assert attributes.instance_state(u1).transient
def test_make_transient_to_detached(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(id=1, name='test')
sess.add(u1)
sess.commit()
sess.close()
u2 = User(id=1)
make_transient_to_detached(u2)
assert 'id' in u2.__dict__
sess.add(u2)
eq_(u2.name, "test")
def test_make_transient_to_detached_no_session_allowed(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(id=1, name='test')
sess.add(u1)
assert_raises_message(
sa.exc.InvalidRequestError,
"Given object must be transient",
make_transient_to_detached, u1
)
def test_make_transient_to_detached_no_key_allowed(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(id=1, name='test')
sess.add(u1)
sess.commit()
sess.expunge(u1)
assert_raises_message(
sa.exc.InvalidRequestError,
"Given object must be transient",
make_transient_to_detached, u1
)
class SessionStateTest(_fixtures.FixtureTest):
run_inserts = None
__prefer_requires__ = ('independent_connections', )
def test_info(self):
s = Session()
eq_(s.info, {})
maker = sessionmaker(info={"global": True, "s1": 5})
s1 = maker()
s2 = maker(info={"s1": 6, "s2": True})
eq_(s1.info, {"global": True, "s1": 5})
eq_(s2.info, {"global": True, "s1": 6, "s2": True})
s2.info["global"] = False
s2.info["s1"] = 7
s3 = maker()
eq_(s3.info, {"global": True, "s1": 5})
maker2 = sessionmaker()
s4 = maker2(info={'s4': 8})
eq_(s4.info, {'s4': 8})
@testing.requires.independent_connections
@engines.close_open_connections
def test_autoflush(self):
User, users = self.classes.User, self.tables.users
bind = self.metadata.bind
mapper(User, users)
conn1 = bind.connect()
conn2 = bind.connect()
sess = create_session(bind=conn1, autocommit=False, autoflush=True)
u = User()
u.name = 'ed'
sess.add(u)
u2 = sess.query(User).filter_by(name='ed').one()
assert u2 is u
eq_(conn1.execute("select count(1) from users").scalar(), 1)
eq_(conn2.execute("select count(1) from users").scalar(), 0)
sess.commit()
eq_(conn1.execute("select count(1) from users").scalar(), 1)
eq_(bind.connect().execute("select count(1) from users").scalar(), 1)
sess.close()
def test_with_no_autoflush(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = Session()
u = User()
u.name = 'ed'
sess.add(u)
def go(obj):
assert u not in sess.query(User).all()
testing.run_as_contextmanager(sess.no_autoflush, go)
assert u in sess.new
assert u in sess.query(User).all()
assert u not in sess.new
def test_deleted_flag(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = sessionmaker()()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.delete(u1)
sess.flush()
assert u1 not in sess
assert_raises(sa.exc.InvalidRequestError, sess.add, u1)
sess.rollback()
assert u1 in sess
sess.delete(u1)
sess.commit()
assert u1 not in sess
assert_raises(sa.exc.InvalidRequestError, sess.add, u1)
make_transient(u1)
sess.add(u1)
sess.commit()
eq_(sess.query(User).count(), 1)
def test_autoflush_expressions(self):
"""test that an expression which is dependent on object state is
evaluated after the session autoflushes. This is the lambda
inside of strategies.py lazy_clause.
"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(Address, backref="user")})
mapper(Address, addresses)
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed', addresses=[Address(email_address='foo')])
sess.add(u)
eq_(sess.query(Address).filter(Address.user == u).one(),
Address(email_address='foo'))
# still works after "u" is garbage collected
sess.commit()
sess.close()
u = sess.query(User).get(u.id)
q = sess.query(Address).filter(Address.user == u)
del u
gc_collect()
eq_(q.one(), Address(email_address='foo'))
@testing.requires.independent_connections
@engines.close_open_connections
def test_autoflush_unbound(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
try:
sess = create_session(autocommit=False, autoflush=True)
u = User()
u.name = 'ed'
sess.add(u)
u2 = sess.query(User).filter_by(name='ed').one()
assert u2 is u
assert sess.execute('select count(1) from users',
mapper=User).scalar() == 1
assert testing.db.connect().execute('select count(1) from '
'users').scalar() == 0
sess.commit()
assert sess.execute('select count(1) from users',
mapper=User).scalar() == 1
assert testing.db.connect().execute('select count(1) from '
'users').scalar() == 1
sess.close()
except:
sess.rollback()
raise
@engines.close_open_connections
def test_autoflush_2(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
conn1 = testing.db.connect()
sess = create_session(bind=conn1, autocommit=False,
autoflush=True)
u = User()
u.name = 'ed'
sess.add(u)
sess.commit()
assert conn1.execute('select count(1) from users').scalar() == 1
assert testing.db.connect().execute('select count(1) from users'
).scalar() == 1
sess.commit()
def test_autocommit_doesnt_raise_on_pending(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session(autocommit=True)
session.add(User(name='ed'))
session.begin()
session.flush()
session.commit()
def test_active_flag(self):
sess = create_session(bind=config.db, autocommit=True)
assert not sess.is_active
sess.begin()
assert sess.is_active
sess.rollback()
assert not sess.is_active
@engines.close_open_connections
def test_add_delete(self):
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
s = create_session()
mapper(User, users, properties={
'addresses': relationship(Address, cascade="all, delete")
})
mapper(Address, addresses)
user = User(name='u1')
assert_raises_message(sa.exc.InvalidRequestError,
'is not persisted', s.delete, user)
s.add(user)
s.flush()
user = s.query(User).one()
s.expunge(user)
assert user not in s
# modify outside of session, assert changes remain/get saved
user.name = "fred"
s.add(user)
assert user in s
assert user in s.dirty
s.flush()
s.expunge_all()
assert s.query(User).count() == 1
user = s.query(User).one()
assert user.name == 'fred'
# ensure its not dirty if no changes occur
s.expunge_all()
assert user not in s
s.add(user)
assert user in s
assert user not in s.dirty
s2 = create_session()
assert_raises_message(sa.exc.InvalidRequestError,
'is already attached to session',
s2.delete, user)
u2 = s2.query(User).get(user.id)
assert_raises_message(sa.exc.InvalidRequestError,
'another instance with key', s.delete, u2)
s.expire(user)
s.expunge(user)
assert user not in s
s.delete(user)
assert user in s
s.flush()
assert user not in s
assert s.query(User).count() == 0
def test_already_attached(self):
User = self.classes.User
users = self.tables.users
mapper(User, users)
s1 = Session()
s2 = Session()
u1 = User(id=1, name='u1')
make_transient_to_detached(u1) # shorthand for actually persisting it
s1.add(u1)
assert_raises_message(
sa.exc.InvalidRequestError,
"Object '<User.*?>' is already attached to session",
s2.add, u1
)
assert u1 not in s2
assert not s2.identity_map.keys()
@testing.uses_deprecated()
def test_identity_conflict(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
for s in (
create_session(),
create_session(weak_identity_map=False),
):
users.delete().execute()
u1 = User(name="ed")
s.add(u1)
s.flush()
s.expunge(u1)
u2 = s.query(User).first()
s.expunge(u2)
s.identity_map.add(sa.orm.attributes.instance_state(u1))
assert_raises(AssertionError, s.identity_map.add,
sa.orm.attributes.instance_state(u2))
def test_pickled_update(self):
users, User = self.tables.users, pickleable.User
mapper(User, users)
sess1 = create_session()
sess2 = create_session()
u1 = User(name='u1')
sess1.add(u1)
assert_raises_message(sa.exc.InvalidRequestError,
'already attached to session', sess2.add,
u1)
u2 = pickle.loads(pickle.dumps(u1))
sess2.add(u2)
def test_duplicate_update(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
Session = sessionmaker()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
assert u1.id is not None
sess.expunge(u1)
assert u1 not in sess
assert Session.object_session(u1) is None
u2 = sess.query(User).get(u1.id)
assert u2 is not None and u2 is not u1
assert u2 in sess
assert_raises(AssertionError, lambda: sess.add(u1))
sess.expunge(u2)
assert u2 not in sess
assert Session.object_session(u2) is None
u1.name = "John"
u2.name = "Doe"
sess.add(u1)
assert u1 in sess
assert Session.object_session(u1) is sess
sess.flush()
sess.expunge_all()
u3 = sess.query(User).get(u1.id)
assert u3 is not u1 and u3 is not u2 and u3.name == u1.name
def test_no_double_save(self):
users = self.tables.users
sess = create_session()
class Foo(object):
def __init__(self):
sess.add(self)
class Bar(Foo):
def __init__(self):
sess.add(self)
Foo.__init__(self)
mapper(Foo, users)
mapper(Bar, users)
b = Bar()
assert b in sess
assert len(list(sess)) == 1
def test_identity_map_mutate(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
sess.add_all([User(name='u1'), User(name='u2'), User(name='u3')])
sess.commit()
# TODO: what are we testing here ? that iteritems() can
# withstand a change? should this be
# more directly attempting to manipulate the identity_map ?
u1, u2, u3 = sess.query(User).all()
for i, (key, value) in enumerate(iter(sess.identity_map.items())):
if i == 2:
del u3
gc_collect()
def _test_extra_dirty_state(self):
users, User = self.tables.users, self.classes.User
m = mapper(User, users)
s = Session()
@event.listens_for(m, "after_update")
def e(mapper, conn, target):
sess = object_session(target)
for entry in list(sess.identity_map.values()):
entry.name = "5"
a1, a2 = User(name="1"), User(name="2")
s.add_all([a1, a2])
s.commit()
a1.name = "3"
return s, a1, a2
def test_extra_dirty_state_post_flush_warning(self):
s, a1, a2 = self._test_extra_dirty_state()
assert_raises_message(
sa.exc.SAWarning,
"Attribute history events accumulated on 1 previously "
"clean instances",
s.commit
)
def test_extra_dirty_state_post_flush_state(self):
s, a1, a2 = self._test_extra_dirty_state()
canary = []
@event.listens_for(s, "after_flush_postexec")
def e(sess, ctx):
canary.append(bool(sess.identity_map._modified))
@testing.emits_warning("Attribute")
def go():
s.commit()
go()
eq_(canary, [False])
def test_deleted_auto_expunged(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
sess.add(User(name='x'))
sess.commit()
u1 = sess.query(User).first()
sess.delete(u1)
assert not was_deleted(u1)
sess.flush()
assert was_deleted(u1)
assert u1 not in sess
assert object_session(u1) is sess
sess.commit()
assert object_session(u1) is None
def test_explicit_expunge_pending(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(name='x')
sess.add(u1)
sess.flush()
sess.expunge(u1)
assert u1 not in sess
assert object_session(u1) is None
sess.rollback()
assert u1 not in sess
assert object_session(u1) is None
def test_explicit_expunge_deleted(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
sess.add(User(name='x'))
sess.commit()
u1 = sess.query(User).first()
sess.delete(u1)
sess.flush()
assert was_deleted(u1)
assert u1 not in sess
assert object_session(u1) is sess
sess.expunge(u1)
assert was_deleted(u1)
assert u1 not in sess
assert object_session(u1) is None
sess.rollback()
assert was_deleted(u1)
assert u1 not in sess
assert object_session(u1) is None
class SessionStateWFixtureTest(_fixtures.FixtureTest):
__backend__ = True
def test_autoflush_rollback(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties={
'addresses': relationship(Address)})
sess = create_session(autocommit=False, autoflush=True)
u = sess.query(User).get(8)
newad = Address(email_address='a new address')
u.addresses.append(newad)
u.name = 'some new name'
assert u.name == 'some new name'
assert len(u.addresses) == 4
assert newad in u.addresses
sess.rollback()
assert u.name == 'ed'
assert len(u.addresses) == 3
assert newad not in u.addresses
# pending objects don't get expired
assert newad.email_address == 'a new address'
def test_expunge_cascade(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties={
'addresses': relationship(Address,
backref=backref("user", cascade="all"),
cascade="all")})
session = create_session()
u = session.query(User).filter_by(id=7).one()
# get everything to load in both directions
print([a.user for a in u.addresses])
# then see if expunge fails
session.expunge(u)
assert sa.orm.object_session(u) is None
assert sa.orm.attributes.instance_state(u).session_id is None
for a in u.addresses:
assert sa.orm.object_session(a) is None
assert sa.orm.attributes.instance_state(a).session_id is None
class NoCyclesOnTransientDetachedTest(_fixtures.FixtureTest):
"""Test the instance_state._strong_obj link that it
is present only on persistent/pending objects and never
transient/detached.
"""
run_inserts = None
def setup(self):
mapper(self.classes.User, self.tables.users)
def _assert_modified(self, u1):
assert sa.orm.attributes.instance_state(u1).modified
def _assert_not_modified(self, u1):
assert not sa.orm.attributes.instance_state(u1).modified
def _assert_cycle(self, u1):
assert sa.orm.attributes.instance_state(u1)._strong_obj is not None
def _assert_no_cycle(self, u1):
assert sa.orm.attributes.instance_state(u1)._strong_obj is None
def _persistent_fixture(self):
User = self.classes.User
u1 = User()
u1.name = "ed"
sess = Session()
sess.add(u1)
sess.flush()
return sess, u1
def test_transient(self):
User = self.classes.User
u1 = User()
u1.name = 'ed'
self._assert_no_cycle(u1)
self._assert_modified(u1)
def test_transient_to_pending(self):
User = self.classes.User
u1 = User()
u1.name = 'ed'
self._assert_modified(u1)
self._assert_no_cycle(u1)
sess = Session()
sess.add(u1)
self._assert_cycle(u1)
sess.flush()
self._assert_no_cycle(u1)
self._assert_not_modified(u1)
def test_dirty_persistent_to_detached_via_expunge(self):
sess, u1 = self._persistent_fixture()
u1.name = 'edchanged'
self._assert_cycle(u1)
sess.expunge(u1)
self._assert_no_cycle(u1)
def test_dirty_persistent_to_detached_via_close(self):
sess, u1 = self._persistent_fixture()
u1.name = 'edchanged'
self._assert_cycle(u1)
sess.close()
self._assert_no_cycle(u1)
def test_clean_persistent_to_detached_via_close(self):
sess, u1 = self._persistent_fixture()
self._assert_no_cycle(u1)
self._assert_not_modified(u1)
sess.close()
u1.name = 'edchanged'
self._assert_modified(u1)
self._assert_no_cycle(u1)
def test_detached_to_dirty_deleted(self):
sess, u1 = self._persistent_fixture()
sess.expunge(u1)
u1.name = 'edchanged'
self._assert_no_cycle(u1)
sess.delete(u1)
self._assert_cycle(u1)
def test_detached_to_dirty_persistent(self):
sess, u1 = self._persistent_fixture()
sess.expunge(u1)
u1.name = 'edchanged'
self._assert_modified(u1)
self._assert_no_cycle(u1)
sess.add(u1)
self._assert_cycle(u1)
self._assert_modified(u1)
def test_detached_to_clean_persistent(self):
sess, u1 = self._persistent_fixture()
sess.expunge(u1)
self._assert_no_cycle(u1)
self._assert_not_modified(u1)
sess.add(u1)
self._assert_no_cycle(u1)
self._assert_not_modified(u1)
def test_move_persistent_clean(self):
sess, u1 = self._persistent_fixture()
sess.close()
s2 = Session()
s2.add(u1)
self._assert_no_cycle(u1)
self._assert_not_modified(u1)
def test_move_persistent_dirty(self):
sess, u1 = self._persistent_fixture()
u1.name = 'edchanged'
self._assert_cycle(u1)
self._assert_modified(u1)
sess.close()
self._assert_no_cycle(u1)
s2 = Session()
s2.add(u1)
self._assert_cycle(u1)
self._assert_modified(u1)
@testing.requires.predictable_gc
def test_move_gc_session_persistent_dirty(self):
sess, u1 = self._persistent_fixture()
u1.name = 'edchanged'
self._assert_cycle(u1)
self._assert_modified(u1)
del sess
gc_collect()
self._assert_cycle(u1)
s2 = Session()
s2.add(u1)
self._assert_cycle(u1)
self._assert_modified(u1)
def test_persistent_dirty_to_expired(self):
sess, u1 = self._persistent_fixture()
u1.name = 'edchanged'
self._assert_cycle(u1)
self._assert_modified(u1)
sess.expire(u1)
self._assert_no_cycle(u1)
self._assert_not_modified(u1)
class WeakIdentityMapTest(_fixtures.FixtureTest):
run_inserts = None
@testing.requires.predictable_gc
def test_weakref(self):
"""test the weak-referencing identity map, which strongly-
references modified items."""
users, User = self.tables.users, self.classes.User
s = create_session()
mapper(User, users)
s.add(User(name='ed'))
s.flush()
assert not s.dirty
user = s.query(User).one()
del user
gc_collect()
assert len(s.identity_map) == 0
user = s.query(User).one()
user.name = 'fred'
del user
gc_collect()
assert len(s.identity_map) == 1
assert len(s.dirty) == 1
assert None not in s.dirty
s.flush()
gc_collect()
assert not s.dirty
assert not s.identity_map
user = s.query(User).one()
assert user.name == 'fred'
assert s.identity_map
@testing.requires.predictable_gc
def test_weakref_pickled(self):
users, User = self.tables.users, pickleable.User
s = create_session()
mapper(User, users)
s.add(User(name='ed'))
s.flush()
assert not s.dirty
user = s.query(User).one()
user.name = 'fred'
s.expunge(user)
u2 = pickle.loads(pickle.dumps(user))
del user
s.add(u2)
del u2
gc_collect()
assert len(s.identity_map) == 1
assert len(s.dirty) == 1
assert None not in s.dirty
s.flush()
gc_collect()
assert not s.dirty
assert not s.identity_map
@testing.requires.predictable_gc
def test_weakref_with_cycles_o2m(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
s = sessionmaker()()
mapper(User, users, properties={
"addresses": relationship(Address, backref="user")
})
mapper(Address, addresses)
s.add(User(name="ed", addresses=[Address(email_address="ed1")]))
s.commit()
user = s.query(User).options(joinedload(User.addresses)).one()
user.addresses[0].user # lazyload
eq_(user, User(name="ed", addresses=[Address(email_address="ed1")]))
del user
gc_collect()
assert len(s.identity_map) == 0
user = s.query(User).options(joinedload(User.addresses)).one()
user.addresses[0].email_address = 'ed2'
user.addresses[0].user # lazyload
del user
gc_collect()
assert len(s.identity_map) == 2
s.commit()
user = s.query(User).options(joinedload(User.addresses)).one()
eq_(user, User(name="ed", addresses=[Address(email_address="ed2")]))
@testing.requires.predictable_gc
def test_weakref_with_cycles_o2o(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
s = sessionmaker()()
mapper(User, users, properties={
"address": relationship(Address, backref="user", uselist=False)
})
mapper(Address, addresses)
s.add(User(name="ed", address=Address(email_address="ed1")))
s.commit()
user = s.query(User).options(joinedload(User.address)).one()
user.address.user
eq_(user, User(name="ed", address=Address(email_address="ed1")))
del user
gc_collect()
assert len(s.identity_map) == 0
user = s.query(User).options(joinedload(User.address)).one()
user.address.email_address = 'ed2'
user.address.user # lazyload
del user
gc_collect()
assert len(s.identity_map) == 2
s.commit()
user = s.query(User).options(joinedload(User.address)).one()
eq_(user, User(name="ed", address=Address(email_address="ed2")))
def test_auto_detach_on_gc_session(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
# can't add u1 to Session,
# already belongs to u2
s2 = Session()
assert_raises_message(
sa.exc.InvalidRequestError,
r".*is already attached to session",
s2.add, u1
)
# garbage collect sess
del sess
gc_collect()
# s2 lets it in now despite u1 having
# session_key
s2.add(u1)
assert u1 in s2
class StrongIdentityMapTest(_fixtures.FixtureTest):
run_inserts = None
@testing.uses_deprecated()
def test_strong_ref(self):
users, User = self.tables.users, self.classes.User
s = create_session(weak_identity_map=False)
mapper(User, users)
# save user
s.add(User(name='u1'))
s.flush()
user = s.query(User).one()
user = None
print(s.identity_map)
gc_collect()
assert len(s.identity_map) == 1
user = s.query(User).one()
assert not s.identity_map._modified
user.name = 'u2'
assert s.identity_map._modified
s.flush()
eq_(users.select().execute().fetchall(), [(user.id, 'u2')])
@testing.uses_deprecated()
@testing.fails_if(lambda: pypy, "pypy has a real GC")
@testing.fails_on('+zxjdbc', 'http://www.sqlalchemy.org/trac/ticket/1473')
def test_prune(self):
users, User = self.tables.users, self.classes.User
s = create_session(weak_identity_map=False)
mapper(User, users)
for o in [User(name='u%s' % x) for x in range(10)]:
s.add(o)
# o is still live after this loop...
self.assert_(len(s.identity_map) == 0)
self.assert_(s.prune() == 0)
s.flush()
gc_collect()
self.assert_(s.prune() == 9)
self.assert_(len(s.identity_map) == 1)
id = o.id
del o
self.assert_(s.prune() == 1)
self.assert_(len(s.identity_map) == 0)
u = s.query(User).get(id)
self.assert_(s.prune() == 0)
self.assert_(len(s.identity_map) == 1)
u.name = 'squiznart'
del u
self.assert_(s.prune() == 0)
self.assert_(len(s.identity_map) == 1)
s.flush()
self.assert_(s.prune() == 1)
self.assert_(len(s.identity_map) == 0)
s.add(User(name='x'))
self.assert_(s.prune() == 0)
self.assert_(len(s.identity_map) == 0)
s.flush()
self.assert_(len(s.identity_map) == 1)
self.assert_(s.prune() == 1)
self.assert_(len(s.identity_map) == 0)
u = s.query(User).get(id)
s.delete(u)
del u
self.assert_(s.prune() == 0)
self.assert_(len(s.identity_map) == 1)
s.flush()
self.assert_(s.prune() == 0)
self.assert_(len(s.identity_map) == 0)
class IsModifiedTest(_fixtures.FixtureTest):
run_inserts = None
def _default_mapping_fixture(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(User, users, properties={
"addresses": relationship(Address)
})
mapper(Address, addresses)
return User, Address
def test_is_modified(self):
User, Address = self._default_mapping_fixture()
s = create_session()
# save user
u = User(name='fred')
s.add(u)
s.flush()
s.expunge_all()
user = s.query(User).one()
assert user not in s.dirty
assert not s.is_modified(user)
user.name = 'fred'
assert user in s.dirty
assert not s.is_modified(user)
user.name = 'ed'
assert user in s.dirty
assert s.is_modified(user)
s.flush()
assert user not in s.dirty
assert not s.is_modified(user)
a = Address()
user.addresses.append(a)
assert user in s.dirty
assert s.is_modified(user)
assert not s.is_modified(user, include_collections=False)
def test_is_modified_passive_off(self):
"""as of 0.8 no SQL is emitted for is_modified()
regardless of the passive flag"""
User, Address = self._default_mapping_fixture()
s = Session()
u = User(name='fred', addresses=[
Address(email_address='foo')])
s.add(u)
s.commit()
u.id
def go():
assert not s.is_modified(u)
self.assert_sql_count(
testing.db,
go,
0
)
s.expire_all()
u.name = 'newname'
# can't predict result here
# deterministically, depending on if
# 'name' or 'addresses' is tested first
mod = s.is_modified(u)
addresses_loaded = 'addresses' in u.__dict__
assert mod is not addresses_loaded
def test_is_modified_passive_on(self):
User, Address = self._default_mapping_fixture()
s = Session()
u = User(name='fred', addresses=[Address(email_address='foo')])
s.add(u)
s.commit()
u.id
def go():
assert not s.is_modified(u, passive=True)
self.assert_sql_count(
testing.db,
go,
0
)
u.name = 'newname'
def go():
assert s.is_modified(u, passive=True)
self.assert_sql_count(
testing.db,
go,
0
)
def test_is_modified_syn(self):
User, users = self.classes.User, self.tables.users
s = sessionmaker()()
mapper(User, users, properties={'uname': sa.orm.synonym('name')})
u = User(uname='fred')
assert s.is_modified(u)
s.add(u)
s.commit()
assert not s.is_modified(u)
class DisposedStates(fixtures.MappedTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata, Column('id', Integer,
primary_key=True, test_needs_autoincrement=True),
Column('data', String(50)))
@classmethod
def setup_classes(cls):
class T(cls.Basic):
def __init__(self, data):
self.data = data
mapper(T, cls.tables.t1)
def teardown(self):
from sqlalchemy.orm.session import _sessions
_sessions.clear()
super(DisposedStates, self).teardown()
def _set_imap_in_disposal(self, sess, *objs):
"""remove selected objects from the given session, as though
they were dereferenced and removed from WeakIdentityMap.
Hardcodes the identity map's "all_states()" method to return the
full list of states. This simulates the all_states() method
returning results, afterwhich some of the states get garbage
collected (this normally only happens during asynchronous gc).
The Session now has one or more InstanceState's which have been
removed from the identity map and disposed.
Will the Session not trip over this ??? Stay tuned.
"""
all_states = sess.identity_map.all_states()
sess.identity_map.all_states = lambda: all_states
for obj in objs:
state = attributes.instance_state(obj)
sess.identity_map.discard(state)
state._dispose()
def _test_session(self, **kwargs):
T = self.classes.T
sess = create_session(**kwargs)
data = o1, o2, o3, o4, o5 = [T('t1'), T('t2'), T('t3'), T('t4'
), T('t5')]
sess.add_all(data)
sess.flush()
o1.data = 't1modified'
o5.data = 't5modified'
self._set_imap_in_disposal(sess, o2, o4, o5)
return sess
def test_flush(self):
self._test_session().flush()
def test_clear(self):
self._test_session().expunge_all()
def test_close(self):
self._test_session().close()
def test_invalidate(self):
self._test_session().invalidate()
def test_expunge_all(self):
self._test_session().expunge_all()
def test_expire_all(self):
self._test_session().expire_all()
def test_rollback(self):
sess = self._test_session(autocommit=False, expire_on_commit=True)
sess.commit()
sess.rollback()
class SessionInterface(fixtures.TestBase):
"""Bogus args to Session methods produce actionable exceptions."""
# TODO: expand with message body assertions.
_class_methods = set((
'connection', 'execute', 'get_bind', 'scalar'))
def _public_session_methods(self):
Session = sa.orm.session.Session
blacklist = set(('begin', 'query'))
ok = set()
for meth in Session.public_methods:
if meth in blacklist:
continue
spec = inspect.getargspec(getattr(Session, meth))
if len(spec[0]) > 1 or spec[1]:
ok.add(meth)
return ok
def _map_it(self, cls):
return mapper(cls, Table('t', sa.MetaData(), Column('id',
Integer, primary_key=True,
test_needs_autoincrement=True)))
def _test_instance_guards(self, user_arg):
watchdog = set()
def x_raises_(obj, method, *args, **kw):
watchdog.add(method)
callable_ = getattr(obj, method)
assert_raises(sa.orm.exc.UnmappedInstanceError,
callable_, *args, **kw)
def raises_(method, *args, **kw):
x_raises_(create_session(), method, *args, **kw)
raises_('__contains__', user_arg)
raises_('add', user_arg)
raises_('add_all', (user_arg,))
raises_('delete', user_arg)
raises_('expire', user_arg)
raises_('expunge', user_arg)
# flush will no-op without something in the unit of work
def _():
class OK(object):
pass
self._map_it(OK)
s = create_session()
s.add(OK())
x_raises_(s, 'flush', (user_arg,))
_()
raises_('is_modified', user_arg)
raises_('merge', user_arg)
raises_('refresh', user_arg)
instance_methods = self._public_session_methods() \
- self._class_methods - set([
'bulk_update_mappings', 'bulk_insert_mappings',
'bulk_save_objects'])
eq_(watchdog, instance_methods,
watchdog.symmetric_difference(instance_methods))
def _test_class_guards(self, user_arg, is_class=True):
watchdog = set()
def raises_(method, *args, **kw):
watchdog.add(method)
callable_ = getattr(create_session(), method)
if is_class:
assert_raises(
sa.orm.exc.UnmappedClassError,
callable_, *args, **kw)
else:
assert_raises(
sa.exc.NoInspectionAvailable, callable_, *args, **kw)
raises_('connection', mapper=user_arg)
raises_('execute', 'SELECT 1', mapper=user_arg)
raises_('get_bind', mapper=user_arg)
raises_('scalar', 'SELECT 1', mapper=user_arg)
eq_(watchdog, self._class_methods,
watchdog.symmetric_difference(self._class_methods))
def test_unmapped_instance(self):
class Unmapped(object):
pass
self._test_instance_guards(Unmapped())
self._test_class_guards(Unmapped)
def test_unmapped_primitives(self):
for prim in ('doh', 123, ('t', 'u', 'p', 'l', 'e')):
self._test_instance_guards(prim)
self._test_class_guards(prim, is_class=False)
def test_unmapped_class_for_instance(self):
class Unmapped(object):
pass
self._test_instance_guards(Unmapped)
self._test_class_guards(Unmapped)
def test_mapped_class_for_instance(self):
class Mapped(object):
pass
self._map_it(Mapped)
self._test_instance_guards(Mapped)
# no class guards- it would pass.
def test_missing_state(self):
class Mapped(object):
pass
early = Mapped()
self._map_it(Mapped)
self._test_instance_guards(early)
self._test_class_guards(early, is_class=False)
class TLTransactionTest(fixtures.MappedTest):
run_dispose_bind = 'once'
__backend__ = True
@classmethod
def setup_bind(cls):
return engines.testing_engine(options=dict(strategy='threadlocal'))
@classmethod
def define_tables(cls, metadata):
Table('users', metadata, Column('id', Integer,
primary_key=True, test_needs_autoincrement=True),
Column('name', String(20)), test_needs_acid=True)
@classmethod
def setup_classes(cls):
class User(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
users, User = cls.tables.users, cls.classes.User
mapper(User, users)
@testing.exclude('mysql', '<', (5, 0, 3), 'FIXME: unknown')
def test_session_nesting(self):
User = self.classes.User
sess = create_session(bind=self.bind)
self.bind.begin()
u = User(name='ed')
sess.add(u)
sess.flush()
self.bind.commit()
class FlushWarningsTest(fixtures.MappedTest):
run_setup_mappers = 'each'
@classmethod
def define_tables(cls, metadata):
Table('user', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(20))
)
Table('address', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('user.id')),
Column('email', String(20))
)
@classmethod
def setup_classes(cls):
class User(cls.Basic):
pass
class Address(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
user, User = cls.tables.user, cls.classes.User
address, Address = cls.tables.address, cls.classes.Address
mapper(User, user, properties={
'addresses': relationship(Address, backref="user")
})
mapper(Address, address)
def test_o2m_cascade_add(self):
Address = self.classes.Address
def evt(mapper, conn, instance):
instance.addresses.append(Address(email='x1'))
self._test(evt, "collection append")
def test_o2m_cascade_remove(self):
def evt(mapper, conn, instance):
del instance.addresses[0]
self._test(evt, "collection remove")
def test_m2o_cascade_add(self):
User = self.classes.User
def evt(mapper, conn, instance):
instance.addresses[0].user = User(name='u2')
self._test(evt, "related attribute set")
def test_m2o_cascade_remove(self):
def evt(mapper, conn, instance):
a1 = instance.addresses[0]
del a1.user
self._test(evt, "related attribute delete")
def test_plain_add(self):
Address = self.classes.Address
def evt(mapper, conn, instance):
object_session(instance).add(Address(email='x1'))
self._test(evt, "Session.add\(\)")
def test_plain_merge(self):
Address = self.classes.Address
def evt(mapper, conn, instance):
object_session(instance).merge(Address(email='x1'))
self._test(evt, "Session.merge\(\)")
def test_plain_delete(self):
Address = self.classes.Address
def evt(mapper, conn, instance):
object_session(instance).delete(Address(email='x1'))
self._test(evt, "Session.delete\(\)")
def _test(self, fn, method):
User = self.classes.User
Address = self.classes.Address
s = Session()
event.listen(User, "after_insert", fn)
u1 = User(name='u1', addresses=[Address(name='a1')])
s.add(u1)
assert_raises_message(
sa.exc.SAWarning,
"Usage of the '%s'" % method,
s.commit
)
|
|
"""auxlib module provides several useful low-level functions as number thresholding or a couple of linear algebra
operations"""
import numpy as np
_epsilon = 0.0001
def to_colour_1(x):
"""Convert number to float in range [0,1]
Parameters
----------
x : convertible to float
number that will be cut to interval [0,1]
Returns
-------
float
0 if x < 0, 1 if x > 1. In other cases x converted to float.
"""
x = float(x)
x = 0. if x < 0 else x
x = 1. if x > 1 else x
return x
def to_colour_255(x):
"""Convert number to int in range [0,255]
Parameters
----------
x : convertible to int
number that will be cut to interval [0,255]
Returns
-------
int
0 if x < 0, 255 if x > 255. In other cases x converted to int
"""
x = int(x)
x = 0 if x < 0 else x
x = 255 if x > 255 else x
return x
def positive(x):
"""Make number be strictly positive
Parameters
----------
x : convertible to float
a number
Returns
-------
float
_epsilon if x is not strictly positive, otherwise x
"""
return _epsilon if x <= 0 else float(x)
def negative(x):
"""Make number be non-positive
Parameters
----------
x : convertible to float
a number
Returns
-------
float
0 if x is positive, otherwise x converted to float
"""
return 0. if x >= 0 else float(x)
def check_positivity(r):
"""Check if all numbers in a container are strictly positive
Parameters
----------
r : iterable
an object with int or float values. Each element is checked whether it is positive
Returns
-------
bool
True if all elements in r are strictly positive, False otherwise
"""
for i in r:
if i <= 0:
return False
return True
def find_vals(a, r):
"""Get density coefficients from logarithmic basis matrix and pixel values.
Parameters
----------
a : ndarray
logarithmic basis matrix, shape (3,2)
r : ndarray
pixel values, shape (3,)
Returns
-------
ndarray
density coefficients, shape (2,)
"""
m00, m01, m11, v0, v1 = 0, 0, 0, 0, 0
for i in range(3):
m00 += a[i, 0] ** 2
m01 += a[i, 0] * a[i, 1]
m11 += a[i, 1] ** 2
v0 += r[i] * a[i, 0]
v1 += r[i] * a[i, 1]
return -np.linalg.solve(np.array([[m00, m01], [m01, m11]]), np.array([v0, v1]))
def get_physical_normal(n):
"""Given unit vector, find the nearest physical unit vector.
A physical unit vector can't have all component positive,
can't have all components negative, and can't have exactly
one zero component.
Parameters
----------
n : ndarray
unit vector, shape (3,)
Returns
-------
ndarray
physical unit vector, shape (3,)
"""
n = np.array(n)
one_zero = (n[0] == 0 and n[1] * n[2] != 0) or\
(n[1] == 0 and n[2] * n[0] != 0) or\
(n[2] == 0 and n[0] * n[1] != 0)
if not check_positivity(n) and not check_positivity(-n) and not one_zero:
return n
# print("Best fitting plane non-physical, attempting to correct...")
if check_positivity(n) or check_positivity(-n):
minimum = np.abs(n[0])
index = 0
for i in range(1, 3):
if np.abs(n[i]) < minimum:
index = i
minimum = np.abs(n[i])
n[index] = 0
n = n / np.linalg.norm(n)
one_zero = (n[0] == 0 and n[1] * n[2] != 0) or\
(n[1] == 0 and n[2] * n[0] != 0) or\
(n[2] == 0 and n[0] * n[1] != 0)
# Correction for normal vector with exactly one zero component
if one_zero:
minimum = 2
index = 0
for i in range(3):
if n[i] != 0 and np.abs(n[i]) < minimum:
index = i
minimum = np.abs(n[i])
n[index] = 0
n = n / np.linalg.norm(n)
# print("Correction error is: ", np.linalg.norm(m - n))
return n
def get_basis_from_normal(n):
"""Finds physical colour basis from given physical (positive) versor (unit vector).
Parameters
----------
n : ndarray
physical unit vector, shape (3,)
Returns
-------
ndarray
(log) basis, shape (2,3)
"""
v = {}
v[0] = [0, abs(n[2]), abs(n[1])]
v[1] = [abs(n[2]), 0, abs(n[0])]
v[2] = [abs(n[1]), abs(n[0]), 0]
good = [(n[(i + 1) % 3] * n[(i + 2) % 3] <= 0) and\
(n[(i + 1) % 3] != 0 or n[(i + 2) % 3] != 0) for i in range(3)]
basis = [v[i] for i in range(3) if good[i]]
assert(len(basis) > 1)
basis = np.array(basis)
basis = [v / np.linalg.norm(v) for v in basis]
return basis[:2]
def orthonormal_rotation(v):
"""Gives a (orthonormal) rotation matrix transforming [1, 0, 0] to the given vector.
Parameters
----------
v : ndarray
versor (unit vector), shape (3,)
Returns
-------
ndarray
rotation matrix, shape (3,3)
"""
if v[0] == 1.:
return np.identity(3)
u = np.array([0., v[1], v[2]]) / np.array((v[1] ** 2 + v[2] ** 2))
u = u - v * np.dot(u, v)
u = u / np.linalg.norm(u)
w = np.cross(v, u)
return np.transpose(np.array([v, u, w]))
def find_vector(mat):
"""Special eigenvector of a special matrix.
Finds eigenvector associated with the smallest eigenvalue and turns it into a unit vector (versor). Assumes that
matrix is in special coordinates such first row and first column (`mat[0,:]`, `mat[:,0]`) do not matter
Parameters
----------
mat : ndarray
(3,3) list or numpy array
Returns
-------
ndarray
normed eigenvector, shape (3,)
"""
eig = np.linalg.eig([[mat[1, 1], mat[1, 2]], [mat[2, 1], mat[2, 2]]])
minimum = eig[0][0]
index = 0
for i in range(1, 2):
if eig[0][i] < minimum:
minimum = eig[0][i]
index = i
n = [0, eig[1][0][index], eig[1][1][index]]
n = n / np.linalg.norm(n)
return n
|
|
from __future__ import absolute_import
from __future__ import division
from .. import backend as K
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from keras.engine import Layer
from keras.engine import InputSpec
from keras.objectives import categorical_crossentropy
from keras.objectives import sparse_categorical_crossentropy
class CRF(Layer):
"""An implementation of linear chain conditional random field (CRF).
An linear chain CRF is defined to maximize the following likelihood function:
$$ L(W, U, b; y_1, ..., y_n) := \frac{1}{Z} \sum_{y_1, ..., y_n} \exp(-a_1' y_1 - a_n' y_n
- \sum_{k=1^n}((f(x_k' W + b) y_k) + y_1' U y_2)), $$
where:
$Z$: normalization constant
$x_k, y_k$: inputs and outputs
This implementation has two modes for optimization:
1. (`join mode`) optimized by maximizing join likelihood, which is optimal in theory of statistics.
Note that in this case, CRF mast be the output/last layer.
2. (`marginal mode`) return marginal probabilities on each time step and optimized via composition
likelihood (product of marginal likelihood), i.e., using `categorical_crossentropy` loss.
Note that in this case, CRF can be either the last layer or an intermediate layer (though not explored).
For prediction (test phrase), one can choose either Viterbi best path (class indices) or marginal
probabilities if probabilities are needed. However, if one chooses *join mode* for training,
Viterbi output is typically better than marginal output, but the marginal output will still perform
reasonably close, while if *marginal mode* is used for training, marginal output usually performs
much better. The default behavior is set according to this observation.
In addition, this implementation supports masking and accepts either onehot or sparse target.
# Examples
```python
model = Sequential()
model.add(Embedding(3001, 300, mask_zero=True)(X)
# use learn_mode = 'join', test_mode = 'viterbi', sparse_target = True (label indice output)
crf = CRF(10, sparse_target=True)
model.add(crf)
# crf.accuracy is default to Viterbi acc if using join-mode (default).
# One can add crf.marginal_acc if interested, but may slow down learning
model.compile('adam', loss=crf.loss_function, metrics=[crf.accuracy])
# y must be label indices (with shape 1 at dim 3) here, since `sparse_target=True`
model.fit(x, y)
# prediction give onehot representation of Viterbi best path
y_hat = model.predict(x_test)
```
# Arguments
units: Positive integer, dimensionality of the output space.
learn_mode: Either 'join' or 'marginal'.
The former train the model by maximizing join likelihood while the latter
maximize the product of marginal likelihood over all time steps.
test_mode: Either 'viterbi' or 'marginal'.
The former is recommended and as default when `learn_mode = 'join'` and
gives one-hot representation of the best path at test (prediction) time,
while the latter is recommended and chosen as default when `learn_mode = 'marginal'`,
which produces marginal probabilities for each time step.
sparse_target: Boolen (default False) indicating if provided labels are one-hot or
indices (with shape 1 at dim 3).
use_boundary: Boolen (default True) inidicating if trainable start-end chain energies
should be added to model.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
chain_initializer: Initializer for the `chain_kernel` weights matrix,
used for the CRF chain energy.
(see [initializers](../initializers.md)).
boundary_initializer: Initializer for the `left_boundary`, 'right_boundary' weights vectors,
used for the start/left and end/right boundary energy.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
chain_regularizer: Regularizer function applied to
the `chain_kernel` weights matrix
(see [regularizer](../regularizers.md)).
boundary_regularizer: Regularizer function applied to
the 'left_boundary', 'right_boundary' weight vectors
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
chain_constraint: Constraint function applied to
the `chain_kernel` weights matrix
(see [constraints](../constraints.md)).
boundary_constraint: Constraint function applied to
the `left_boundary`, `right_boundary` weights vectors
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used.
Unrolling can speed-up a RNN, although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
# Input shape
3D tensor with shape `(nb_samples, timesteps, input_dim)`.
# Output shape
3D tensor with shape `(nb_samples, timesteps, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
"""
def __init__(self, units,
learn_mode='join',
test_mode=None,
sparse_target=False,
use_boundary=True,
use_bias=True,
activation='linear',
kernel_initializer='glorot_uniform',
chain_initializer='orthogonal',
bias_initializer='zeros',
boundary_initializer='zeros',
kernel_regularizer=None,
chain_regularizer=None,
boundary_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
chain_constraint=None,
boundary_constraint=None,
bias_constraint=None,
input_dim=None,
unroll=False,
**kwargs):
super(CRF, self).__init__(**kwargs)
self.supports_masking = True
self.units = units
self.learn_mode = learn_mode
assert self.learn_mode in ['join', 'marginal']
self.test_mode = test_mode
if self.test_mode is None:
self.test_mode = 'viterbi' if self.learn_mode == 'join' else 'marginal'
else:
assert self.test_mode in ['viterbi', 'marginal']
self.sparse_target = sparse_target
self.use_boundary = use_boundary
self.use_bias = use_bias
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.chain_initializer = initializers.get(chain_initializer)
self.boundary_initializer = initializers.get(boundary_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.chain_regularizer = regularizers.get(chain_regularizer)
self.boundary_regularizer = regularizers.get(boundary_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.chain_constraint = constraints.get(chain_constraint)
self.boundary_constraint = constraints.get(boundary_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.unroll = unroll
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
self.input_dim = input_shape[-1]
self.kernel = self.add_weight((self.input_dim, self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.chain_kernel = self.add_weight((self.units, self.units),
name='chain_kernel',
initializer=self.chain_initializer,
regularizer=self.chain_regularizer,
constraint=self.chain_constraint)
if self.use_bias:
self.bias = self.add_weight((self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.use_boundary:
self.left_boundary = self.add_weight((self.units,),
name='left_boundary',
initializer=self.boundary_initializer,
regularizer=self.boundary_regularizer,
constraint=self.boundary_constraint)
self.right_boundary = self.add_weight((self.units,),
name='right_boundary',
initializer=self.boundary_initializer,
regularizer=self.boundary_regularizer,
constraint=self.boundary_constraint)
self.built = True
def call(self, X, mask=None):
if mask is not None:
assert K.ndim(mask) == 2, 'Input mask to CRF must have dim 2 if not None'
if self.test_mode == 'viterbi':
test_output = self.viterbi_decoding(X, mask)
else:
test_output = self.get_marginal_prob(X, mask)
self.uses_learning_phase = True
if self.learn_mode == 'join':
train_output = K.zeros_like(K.dot(X, self.kernel))
out = K.in_train_phase(train_output, test_output)
else:
if self.test_mode == 'viterbi':
train_output = self.get_marginal_prob(X, mask)
out = K.in_train_phase(train_output, test_output)
else:
out = test_output
return out
def compute_output_shape(self, input_shape):
return input_shape[:2] + (self.units,)
def compute_mask(self, input, mask=None):
if mask is not None and self.learn_mode == 'join':
return K.any(mask, axis=1)
return mask
def get_config(self):
config = {'units': self.units,
'learn_mode': self.learn_mode,
'test_mode': self.test_mode,
'use_boundary': self.use_boundary,
'use_bias': self.use_bias,
'sparse_target': self.sparse_target,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'chain_initializer': initializers.serialize(self.chain_initializer),
'boundary_initializer': initializers.serialize(self.boundary_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'activation': activations.serialize(self.activation),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'chain_regularizer': regularizers.serialize(self.chain_regularizer),
'boundary_regularizer': regularizers.serialize(self.boundary_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'chain_constraint': constraints.serialize(self.chain_constraint),
'boundary_constraint': constraints.serialize(self.boundary_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'input_dim': self.input_dim,
'unroll': self.unroll}
base_config = super(CRF, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@property
def loss_function(self):
if self.learn_mode == 'join':
def loss(y_true, y_pred):
assert self.inbound_nodes, 'CRF has not connected to any layer.'
assert not self.outbound_nodes, 'When learn_model="join", CRF must be the last layer.'
if self.sparse_target:
y_true = K.one_hot(K.cast(y_true[:, :, 0], 'int32'), self.units)
X = self.inbound_nodes[0].input_tensors[0]
mask = self.inbound_nodes[0].input_masks[0]
nloglik = self.get_negative_log_likelihood(y_true, X, mask)
return nloglik
return loss
else:
if self.sparse_target:
return sparse_categorical_crossentropy
else:
return categorical_crossentropy
@property
def accuracy(self):
if self.test_mode == 'viterbi':
return self.viterbi_acc
else:
return self.marginal_acc
@staticmethod
def _get_accuracy(y_true, y_pred, mask, sparse_target=False):
y_pred = K.argmax(y_pred, -1)
if sparse_target:
y_true = K.cast(y_true[:, :, 0], K.dtype(y_pred))
else:
y_true = K.argmax(y_true, -1)
judge = K.cast(K.equal(y_pred, y_true), K.floatx())
if mask is None:
return K.mean(judge)
else:
mask = K.cast(mask, K.floatx())
return K.sum(judge * mask) / K.sum(mask)
@property
def viterbi_acc(self):
def acc(y_true, y_pred):
X = self.inbound_nodes[0].input_tensors[0]
mask = self.inbound_nodes[0].input_masks[0]
y_pred = self.viterbi_decoding(X, mask)
return self._get_accuracy(y_true, y_pred, mask, self.sparse_target)
acc.func_name = 'viterbi_acc'
return acc
@property
def marginal_acc(self):
def acc(y_true, y_pred):
X = self.inbound_nodes[0].input_tensors[0]
mask = self.inbound_nodes[0].input_masks[0]
y_pred = self.get_marginal_prob(X, mask)
return self._get_accuracy(y_true, y_pred, mask, self.sparse_target)
acc.func_name = 'marginal_acc'
return acc
@staticmethod
def softmaxNd(x, axis=-1):
m = K.max(x, axis=axis, keepdims=True)
exp_x = K.exp(x - m)
prob_x = exp_x / K.sum(exp_x, axis=axis, keepdims=True)
return prob_x
@staticmethod
def shift_left(x, offset=1):
assert offset > 0
return K.concatenate([x[:, offset:], K.zeros_like(x[:, :offset])], axis=1)
@staticmethod
def shift_right(x, offset=1):
assert offset > 0
return K.concatenate([K.zeros_like(x[:, :offset]), x[:, :-offset]], axis=1)
def add_boundary_energy(self, energy, mask, start, end):
start = K.expand_dims(K.expand_dims(start, 0), 0)
end = K.expand_dims(K.expand_dims(end, 0), 0)
if mask is None:
energy = K.concatenate([energy[:, :1, :] + start, energy[:, 1:, :]], axis=1)
energy = K.concatenate([energy[:, :-1, :], energy[:, -1:, :] + end], axis=1)
else:
mask = K.expand_dims(K.cast(mask, K.floatx()))
start_mask = K.cast(K.greater(mask, self.shift_right(mask)), K.floatx())
end_mask = K.cast(K.greater(self.shift_left(mask), mask), K.floatx())
energy = energy + start_mask * start
energy = energy + end_mask * end
return energy
def get_log_normalization_constant(self, input_energy, mask, **kwargs):
"""Compute logarithm of the normalization constance Z, where
Z = sum exp(-E) -> logZ = log sum exp(-E) =: -nlogZ
"""
# should have logZ[:, i] == logZ[:, j] for any i, j
logZ = self.recursion(input_energy, mask, return_sequences=False, **kwargs)
return logZ[:, 0]
def get_energy(self, y_true, input_energy, mask):
"""Energy = a1' y1 + u1' y1 + y1' U y2 + u2' y2 + y2' U y3 + u3' y3 + an' y3
"""
input_energy = K.sum(input_energy * y_true, 2) # (B, T)
chain_energy = K.sum(K.dot(y_true[:, :-1, :], self.chain_kernel) * y_true[:, 1:, :], 2) # (B, T-1)
if mask is not None:
mask = K.cast(mask, K.floatx())
chain_mask = mask[:, :-1] * mask[:, 1:] # (B, T-1), mask[:,:-1]*mask[:,1:] makes it work with any padding
input_energy = input_energy * mask
chain_energy = chain_energy * chain_mask
total_energy = K.sum(input_energy, -1) + K.sum(chain_energy, -1) # (B, )
return total_energy
def get_negative_log_likelihood(self, y_true, X, mask):
"""Compute the loss, i.e., negative log likelihood (normalize by number of time steps)
likelihood = 1/Z * exp(-E) -> neg_log_like = - log(1/Z * exp(-E)) = logZ + E
"""
input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
if self.use_boundary:
input_energy = self.add_boundary_energy(input_energy, mask, self.left_boundary, self.right_boundary)
energy = self.get_energy(y_true, input_energy, mask)
logZ = self.get_log_normalization_constant(input_energy, mask, input_length=K.int_shape(X)[1])
nloglik = logZ + energy
if mask is not None:
nloglik = nloglik / K.sum(K.cast(mask, K.floatx()), 1)
else:
nloglik = nloglik / K.cast(K.shape(X)[1], K.floatx())
return nloglik
def step(self, input_energy_t, states, return_logZ=True):
# not in the following `prev_target_val` has shape = (B, F)
# where B = batch_size, F = output feature dim
# Note: `i` is of float32, due to the behavior of `K.rnn`
prev_target_val, i, chain_energy = states[:3]
t = K.cast(i[0, 0], dtype='int32')
if len(states) > 3:
if K.backend() == 'theano':
m = states[3][:, t:(t + 2)]
else:
m = K.tf.slice(states[3], [0, t], [-1, 2])
input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
chain_energy = chain_energy * K.expand_dims(K.expand_dims(m[:, 0] * m[:, 1])) # (1, F, F)*(B, 1, 1) -> (B, F, F)
if return_logZ:
energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2) # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
new_target_val = K.logsumexp(-energy, 1) # shapes: (B, F)
return new_target_val, [new_target_val, i + 1]
else:
energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
min_energy = K.min(energy, 1)
argmin_table = K.cast(K.argmin(energy, 1), K.floatx()) # cast for tf-version `K.rnn`
return argmin_table, [min_energy, i + 1]
def recursion(self, input_energy, mask=None, go_backwards=False, return_sequences=True, return_logZ=True, input_length=None):
"""Forward (alpha) or backward (beta) recursion
If `return_logZ = True`, compute the logZ, the normalization constance:
\[ Z = \sum_{y1, y2, y3} exp(-E) # energy
= \sum_{y1, y2, y3} exp(-(u1' y1 + y1' W y2 + u2' y2 + y2' W y3 + u3' y3))
= sum_{y2, y3} (exp(-(u2' y2 + y2' W y3 + u3' y3)) sum_{y1} exp(-(u1' y1' + y1' W y2))) \]
Denote:
\[ S(y2) := sum_{y1} exp(-(u1' y1 + y1' W y2)), \]
\[ Z = sum_{y2, y3} exp(log S(y2) - (u2' y2 + y2' W y3 + u3' y3)) \]
\[ logS(y2) = log S(y2) = log_sum_exp(-(u1' y1' + y1' W y2)) \]
Note that:
yi's are one-hot vectors
u1, u3: boundary energies have been merged
If `return_logZ = False`, compute the Viterbi's best path lookup table.
"""
chain_energy = self.chain_kernel
chain_energy = K.expand_dims(chain_energy, 0) # shape=(1, F, F): F=num of output features. 1st F is for t-1, 2nd F for t
prev_target_val = K.zeros_like(input_energy[:, 0, :]) # shape=(B, F), dtype=float32
if go_backwards:
input_energy = K.reverse(input_energy, 1)
if mask is not None:
mask = K.reverse(mask, 1)
initial_states = [prev_target_val, K.zeros_like(prev_target_val[:, :1])]
constants = [chain_energy]
if mask is not None:
mask2 = K.cast(K.concatenate([mask, K.zeros_like(mask[:, :1])], axis=1), K.floatx())
constants.append(mask2)
def _step(input_energy_i, states):
return self.step(input_energy_i, states, return_logZ)
target_val_last, target_val_seq, _ = K.rnn(_step, input_energy, initial_states, constants=constants,
input_length=input_length, unroll=self.unroll)
if return_sequences:
if go_backwards:
target_val_seq = K.reverse(target_val_seq, 1)
return target_val_seq
else:
return target_val_last
def forward_recursion(self, input_energy, **kwargs):
return self.recursion(input_energy, **kwargs)
def backward_recursion(self, input_energy, **kwargs):
return self.recursion(input_energy, go_backwards=True, **kwargs)
def get_marginal_prob(self, X, mask=None):
input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
if self.use_boundary:
input_energy = self.add_boundary_energy(input_energy, mask, self.left_boundary, self.right_boundary)
input_length = K.int_shape(X)[1]
alpha = self.forward_recursion(input_energy, mask=mask, input_length=input_length)
beta = self.backward_recursion(input_energy, mask=mask, input_length=input_length)
if mask is not None:
input_energy = input_energy * K.expand_dims(K.cast(mask, K.floatx()))
margin = -(self.shift_right(alpha) + input_energy + self.shift_left(beta))
return self.softmaxNd(margin)
def viterbi_decoding(self, X, mask=None):
input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
if self.use_boundary:
input_energy = self.add_boundary_energy(input_energy, mask, self.left_boundary, self.right_boundary)
argmin_tables = self.recursion(input_energy, mask, return_logZ=False)
argmin_tables = K.cast(argmin_tables, 'int32')
# backward to find best path, `initial_best_idx` can be any, as all elements in the last argmin_table are the same
argmin_tables = K.reverse(argmin_tables, 1)
initial_best_idx = [K.expand_dims(argmin_tables[:, 0, 0])] # matrix instead of vector is required by tf `K.rnn`
if K.backend() == 'theano':
initial_best_idx = [K.T.unbroadcast(initial_best_idx[0], 1)]
def gather_each_row(params, indices):
n = K.shape(indices)[0]
if K.backend() == 'theano':
return params[K.T.arange(n), indices]
else:
indices = K.transpose(K.stack([K.tf.range(n), indices]))
return K.tf.gather_nd(params, indices)
def find_path(argmin_table, best_idx):
next_best_idx = gather_each_row(argmin_table, best_idx[0][:, 0])
next_best_idx = K.expand_dims(next_best_idx)
if K.backend() == 'theano':
next_best_idx = K.T.unbroadcast(next_best_idx, 1)
return next_best_idx, [next_best_idx]
_, best_paths, _ = K.rnn(find_path, argmin_tables, initial_best_idx, input_length=K.int_shape(X)[1], unroll=self.unroll)
best_paths = K.reverse(best_paths, 1)
best_paths = K.squeeze(best_paths, 2)
return K.one_hot(best_paths, self.units)
|
|
import warnings
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.testing import assert_allclose_dense_sparse
from ..externals.six import string_types
def _identity(X):
"""The identity function.
"""
return X
class FunctionTransformer(BaseEstimator, TransformerMixin):
"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its X (and optionally y) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <function_transformer>`.
Parameters
----------
func : callable, optional default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
inverse_func : callable, optional default=None
The callable to use for the inverse transformation. This will be
passed the same arguments as inverse transform, with args and
kwargs forwarded. If inverse_func is None, then inverse_func
will be the identity function.
validate : bool, optional default=True
Indicate that the input X array should be checked before calling
``func``. The possibilities are:
- If False, there is no input validation.
- If True, then X will be converted to a 2-dimensional NumPy array or
sparse matrix. If the conversion is not possible an exception is
raised.
.. deprecated:: 0.20
``validate=True`` as default will be replaced by
``validate=False`` in 0.22.
accept_sparse : boolean, optional
Indicate that func accepts a sparse matrix as input. If validate is
False, this has no effect. Otherwise, if accept_sparse is false,
sparse matrix inputs will cause an exception to be raised.
pass_y : bool, optional default=False
Indicate that transform should forward the y argument to the
inner callable.
.. deprecated::0.19
check_inverse : bool, default=True
Whether to check that or ``func`` followed by ``inverse_func`` leads to
the original inputs. It can be used for a sanity check, raising a
warning when the condition is not fulfilled.
.. versionadded:: 0.20
kw_args : dict, optional
Dictionary of additional keyword arguments to pass to func.
inv_kw_args : dict, optional
Dictionary of additional keyword arguments to pass to inverse_func.
"""
def __init__(self, func=None, inverse_func=None, validate=None,
accept_sparse=False, pass_y='deprecated', check_inverse=True,
kw_args=None, inv_kw_args=None):
self.func = func
self.inverse_func = inverse_func
self.validate = validate
self.accept_sparse = accept_sparse
self.pass_y = pass_y
self.check_inverse = check_inverse
self.kw_args = kw_args
self.inv_kw_args = inv_kw_args
def _check_input(self, X):
# FIXME: Future warning to be removed in 0.22
if self.validate is None:
self._validate = True
warnings.warn("The default validate=True will be replaced by "
"validate=False in 0.22.", FutureWarning)
else:
self._validate = self.validate
if self._validate:
return check_array(X, accept_sparse=self.accept_sparse)
return X
def _check_inverse_transform(self, X):
"""Check that func and inverse_func are the inverse."""
idx_selected = slice(None, None, max(1, X.shape[0] // 100))
try:
assert_allclose_dense_sparse(
X[idx_selected],
self.inverse_transform(self.transform(X[idx_selected])))
except AssertionError:
warnings.warn("The provided functions are not strictly"
" inverse of each other. If you are sure you"
" want to proceed regardless, set"
" 'check_inverse=False'.", UserWarning)
def fit(self, X, y=None):
"""Fit transformer by checking X.
If ``validate`` is ``True``, ``X`` will be checked.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
Returns
-------
self
"""
X = self._check_input(X)
if (self.check_inverse and not (self.func is None or
self.inverse_func is None)):
self._check_inverse_transform(X)
return self
def transform(self, X, y='deprecated'):
"""Transform X using the forward function.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
y : (ignored)
.. deprecated::0.19
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
return self._transform(X, y=y, func=self.func, kw_args=self.kw_args)
def inverse_transform(self, X, y='deprecated'):
"""Transform X using the inverse function.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
y : (ignored)
.. deprecated::0.19
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on inverse_transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
return self._transform(X, y=y, func=self.inverse_func,
kw_args=self.inv_kw_args)
def _transform(self, X, y=None, func=None, kw_args=None):
X = self._check_input(X)
if func is None:
func = _identity
if (not isinstance(self.pass_y, string_types) or
self.pass_y != 'deprecated'):
# We do this to know if pass_y was set to False / True
pass_y = self.pass_y
warnings.warn("The parameter pass_y is deprecated since 0.19 and "
"will be removed in 0.21", DeprecationWarning)
else:
pass_y = False
return func(X, *((y,) if pass_y else ()),
**(kw_args if kw_args else {}))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Relu and ReluGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def _elu_grad_grad(activation):
if activation < 0:
return np.exp(activation)
return 0
class ReluTest(test.TestCase):
def _npRelu(self, np_features):
return np.maximum(np_features, np.zeros(np_features.shape))
def testNpRelu(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 0.0], [0.1, 0.0, 0.5, 0.0, 0.9]]),
self._npRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testRelu(self, np_features):
np_relu = self._npRelu(np_features)
tf_relu = nn_ops.relu(np_features)
self.assertAllClose(np_relu, tf_relu)
self.assertShapeEqual(np_relu, tf_relu)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testReluInt8x4GoodShape(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest("No GPU available")
inputs = np.array([[-50, 7, 23, 0], [-1, -5, 6, 11]])
np_relu = self._npRelu(inputs)
tf_relu = nn_ops.relu(constant_op.constant(inputs, dtypes.qint8))
self.assertAllClose(np_relu, tf_relu)
self.assertShapeEqual(np_relu, tf_relu)
@test_util.disable_xla("b/123338077") # Passes with XLA
def testReluInt8x4BadShape(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest("No GPU available")
inputs = constant_op.constant(
np.array([[-50, 7, 23], [0, 1, -5], [6, -2, 11]]), dtypes.qint8)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Tensor size must be a multiple of 4 for Relu<qint8>. Got 9"):
self.evaluate(nn_ops.relu(inputs))
inputs = constant_op.constant(
np.array([1, -2, 3, -4, 5, -6, 7, -8, 9, -8, 7, -6, 5, -4, 3, -2, 1]),
dtypes.qint8)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Tensor size must be a multiple of 4 for Relu<qint8>. Got 17"):
self.evaluate(nn_ops.relu(inputs))
def testNoElement(self):
self._testRelu(np.array([[], []], dtype=np.float32))
@test_util.disable_xla("b/157978028: Does not yet pass with XLA")
def testNaNPropagation(self):
for t in [np.float16, np.float32, np.float64]:
self._testRelu(np.array([-1, np.nan, 1, np.nan]).astype(t))
# The gradient test for ReLU is a bit tricky as the derivative is not well
# defined at around zero and we want to avoid that in terms of input values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(
nn_ops.relu, [x], delta=1.0 / 1024))
self.assertLess(err, 1e-6)
# The gradient test for ReLU is a bit tricky as the derivative is not well
# defined at around zero and we want to avoid that in terms of input values.
def testGradientFloat16(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float16,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu, [x]))
self.assertLess(err, 1e-6)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(
nn_ops.relu, [x], delta=1.0 / 1024))
self.assertLess(err, 1e-15)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x], delta=1.0 / 1024))
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x], delta=1.0 / 1024))
self.assertLess(err, 1e-10)
def testGradientScalar(self):
x = variables.Variable(100.)
def loss():
return nn_ops.relu(x)**2
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.25)
self.evaluate(variables.global_variables_initializer())
self.evaluate(optimizer.minimize(loss))
self.assertAllClose(x.read_value(), 50.0)
def testGradientNoElement(self):
with self.cached_session():
def f(x):
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.relu(x)
return tape.gradient(y, x)
x = np.asarray([[], []], dtype=np.float32)
z = list(gradient_checker_v2.compute_gradient(f, [x]))[0][0]
self.assertAllEqual(z, np.reshape(x, (0, 0)))
class Relu6Test(test.TestCase):
def _npRelu6(self, np_features):
sixes = np.copy(np_features)
sixes.fill(6.0)
return np.minimum(
np.maximum(np_features, np.zeros(np_features.shape)), sixes)
def testNpRelu6(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 6.0], [0.1, 0.0, 6.0, 0.0, 0.9]]),
self._npRelu6(
np.array([[-0.9, 0.7, -0.5, 0.3, 6.0], [0.1, -0.3, 6.5, -0.7,
0.9]])))
def _testRelu6(self, np_features):
np_relu6 = self._npRelu6(np_features)
tf_relu6 = nn_ops.relu6(np_features)
self.assertAllClose(np_relu6, tf_relu6)
self.assertShapeEqual(np_relu6, tf_relu6)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float, np.double]:
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
@test_util.disable_xla("b/157978028: Does not yet pass with XLA")
def testNaNPropagation(self):
for t in [np.float16, np.float32, np.float64]:
self._testRelu6(np.array([-1, np.nan, 1, 7, np.nan]).astype(t))
# The gradient test for ReLU6 is a bit tricky as the derivative is
# not well defined at around zero and six and we want to avoid that
# in terms of input values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x]))
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x]))
self.assertLess(err, 1e-10)
class LeakyReluTest(test.TestCase):
def _npLeakyRelu(self, np_features, alpha=0.1):
return np.maximum(np_features, alpha * np_features)
def testNpLeakyRelu(self):
self.assertAllClose(
np.array([[-0.09, 0.7, -0.05, 0.3, -0.01],
[0.1, -0.03, 0.5, -0.07, 0.9]]),
self._npLeakyRelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]]),
alpha=0.1))
def _testLeakyRelu(self, np_features, alpha):
np_leaky_relu = self._npLeakyRelu(np_features, alpha)
tf_leaky_relu = nn_ops.leaky_relu(np_features, alpha)
self.assertAllClose(np_leaky_relu, tf_leaky_relu)
self.assertShapeEqual(np_leaky_relu, tf_leaky_relu)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testLeakyRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
alpha=0.2)
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testLeakyRelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
alpha=0.1)
def testNaNPropagation(self):
for t in [np.float16, np.float32, np.float64]:
self._testLeakyRelu(np.array([-1, np.nan, 1, np.nan]).astype(t),
alpha=0.2)
# The gradient test for Leaky ReLU is a bit tricky as the derivative is not
# well defined at around zero and we want to avoid that in terms of input
# values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
self.assertLess(err, 1e-10)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.leaky_relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.leaky_relu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
self.assertLess(err, 1e-10)
def testGradientScalar(self):
x = variables.Variable(-100.)
def loss():
return nn_ops.leaky_relu(x, 0.05)**2
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.2)
self.evaluate(variables.global_variables_initializer())
self.evaluate(optimizer.minimize(loss))
self.assertAllClose(x.read_value(), -99.9)
def testUnexpectedAlphaValue(self):
self.assertAllClose(
np.array([[-9.0, 0.7, -5.0, 0.3, -0.1], [0.1, -3.0, 0.5, -27.0, 0.9]]),
nn_ops.leaky_relu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.01],
[0.1, -0.3, 0.5, -2.7, 0.9]]),
alpha=10))
self.assertAllClose(
np.array([[9.0, 0.7, 5.0, 0.3, 0.1], [0.1, 3.0, 0.5, 27.0, 0.9]]),
nn_ops.leaky_relu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.01],
[0.1, -0.3, 0.5, -2.7, 0.9]]),
alpha=-10))
class EluTest(test.TestCase):
def _npElu(self, np_features):
return np.where(np_features < 0, np.exp(np_features) - 1, np_features)
def testNpElu(self):
self.assertAllClose(
np.array([[-0.59343034025, 0.7, -0.39346934028, 0.3, -0.09516258196],
[0.1, -0.25918177931, 0.5, -0.5034146962, 0.9]]),
self._npElu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testElu(self, np_features):
np_elu = self._npElu(np_features)
tf_elu = nn_ops.elu(np_features)
self.assertAllCloseAccordingToType(np_elu, tf_elu)
self.assertShapeEqual(np_elu, tf_elu)
def testNumbersCPU(self):
for t in [np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testElu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testElu(np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNaNPropagation(self):
for t in [np.float16, np.float32, np.float64]:
self._testElu(np.array([-1, np.nan, 1, np.nan]).astype(t))
def testGradientFloat32(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float32, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.elu, [x]))
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float64, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.elu, [x]))
self.assertLess(err, 1e-6)
def testGradGrad(self):
with self.cached_session():
def f(x):
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
y = nn_ops.elu(x)
dy = tape.gradient(y, x)
return tape.gradient(dy, x)
for x in [-1., -0.5, 0.5, 1.]:
got = self.evaluate(f(constant_op.constant(x)))
want = _elu_grad_grad(x)
err = np.abs(got - want)
self.assertLess(err, 1e-4)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.elu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.elu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
self.assertLess(err, 1e-6)
class SeluTest(test.TestCase):
def _npSelu(self, np_features):
scale = 1.0507009873554804934193349852946
scale_alpha = 1.7580993408473768599402175208123
return np.where(np_features < 0, scale_alpha * (np.exp(np_features) - 1),
scale * np_features)
def testNpSelu(self):
self.assertAllClose(
np.array([[-1.0433095, 0.73549069, -0.6917582, 0.3152103, -0.16730527],
[0.1050701, -0.45566732, 0.5253505, -0.88505305, 0.9456309]]),
self._npSelu(
np.array([[-0.9, 0.7, -0.5, 0.3, -0.1], [0.1, -0.3, 0.5, -0.7,
0.9]])))
def _testSelu(self, np_features):
np_selu = self._npSelu(np_features)
tf_selu = nn_ops.selu(np_features)
self.assertAllCloseAccordingToType(np_selu, tf_selu)
self.assertShapeEqual(np_selu, tf_selu)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
# Force executed on CPU in case GPU kernels are available.
with ops.device("/device:CPU:0"):
self._testSelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testGradientFloat32(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float32, order="F")
err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(
nn_ops.selu, [x], delta=1.0 / 1024))
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
x = np.asarray(x_val, dtype=np.float64, order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.selu, [x]))
self.assertLess(err, 1e-6)
def testGradGradFloat32(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float32
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.selu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x], delta=1.0 / 1024))
self.assertLess(err, 1e-4)
def testGradGradFloat64(self):
with self.cached_session():
def f(x):
assert x.dtype == dtypes.float64
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.selu(x)
return tape.gradient(y, x)
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
self.assertLess(err, 1e-6)
class CreluTest(test.TestCase):
def testCreluShape(self):
f = random_ops.random_normal([50, 5, 7, 10])
t = nn_ops.crelu(f)
self.assertEqual([50, 5, 7, 20], t.get_shape())
def _testCrelu(self, np_features):
np_relu = np.maximum(np_features, np.zeros_like(np_features))
np_neg_relu = np.maximum(-np_features, np.zeros_like(np_features))
np_crelu = np.concatenate((np_relu, np_neg_relu),
len(np_features.shape) - 1)
tf_crelu = nn_ops.crelu(np_features)
self.assertAllClose(np_crelu, tf_crelu)
self.assertShapeEqual(np_crelu, tf_crelu)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testCrelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float32, np.float64]:
self._testCrelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersWithAxis0(self):
tf_crelu = nn_ops.crelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]), axis=0)
np_crelu = np.array([[0, 7, 0, 3, 0], [1, 0, 5, 0, 9], [9, 0, 5, 0, 1],
[0, 3, 0, 7, 0]])
self.assertAllEqual(np_crelu, tf_crelu)
def testNumbersWithAxis1(self):
tf_crelu = nn_ops.crelu(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]), axis=1)
np_crelu = np.array([[0, 7, 0, 3, 0, 9, 0, 5, 0, 1],
[1, 0, 5, 0, 9, 0, 3, 0, 7, 0]])
self.assertAllEqual(np_crelu, tf_crelu)
if __name__ == "__main__":
test.main()
|
|
import datetime
import django
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.db import connection, models
qn = connection.ops.quote_name
from taggit.models import TagBase, GenericTaggedItemBase, ItemBase
from .settings import CATEGORY_CHOICES
def get_queryset_and_model(queryset_or_model):
"""
Given a ``QuerySet`` or a ``Model``, returns a two-tuple of
(queryset, model).
If a ``Model`` is given, the ``QuerySet`` returned will be created
using its default manager.
"""
try:
return queryset_or_model, queryset_or_model.model
except AttributeError:
return queryset_or_model._default_manager.all(), queryset_or_model
class ConceptModelManager(models.Manager):
def get_intersection_by_model(self, queryset_or_model, tags):
"""
Create a ``QuerySet`` containing instances of the specified
model associated with *all* of the given list of tags.
"""
tag_count = len(tags)
queryset, model = get_queryset_and_model(queryset_or_model)
if not tag_count:
return model._default_manager.none()
model_table = qn(model._meta.db_table)
# This query selects the ids of all objects which have all the
# given tags.
query = """
SELECT %(model_pk)s
FROM %(model)s, %(tagged_item)s
WHERE %(tagged_item)s.content_type_id = %(content_type_id)s
AND %(tagged_item)s.tag_id IN (%(tag_id_placeholders)s)
AND %(model_pk)s = %(tagged_item)s.object_id
GROUP BY %(model_pk)s
HAVING COUNT(%(model_pk)s) = %(tag_count)s""" % {
'model_pk': '%s.%s' % (model_table, qn(model._meta.pk.column)),
'model': model_table,
'tagged_item': qn(self.model._meta.db_table),
'content_type_id': ContentType.objects.get_for_model(model).pk,
'tag_id_placeholders': ','.join(['%s'] * tag_count),
'tag_count': tag_count,
}
cursor = connection.cursor()
cursor.execute(query, [tag.pk for tag in tags])
object_ids = [row[0] for row in cursor.fetchall()]
if len(object_ids) > 0:
return queryset.filter(pk__in=object_ids)
else:
return model._default_manager.none()
def get_union_by_model(self, queryset_or_model, tags):
"""
Create a ``QuerySet`` containing instances of the specified
model associated with *any* of the given list of tags.
"""
tag_count = len(tags)
queryset, model = get_queryset_and_model(queryset_or_model)
if not tag_count:
return model._default_manager.none()
model_table = qn(model._meta.db_table)
# This query selects the ids of all objects which have any of
# the given tags.
query = """
SELECT %(model_pk)s
FROM %(model)s, %(tagged_item)s
WHERE %(tagged_item)s.content_type_id = %(content_type_id)s
AND %(tagged_item)s.tag_id IN (%(tag_id_placeholders)s)
AND %(model_pk)s = %(tagged_item)s.object_id
GROUP BY %(model_pk)s""" % {
'model_pk': '%s.%s' % (model_table, qn(model._meta.pk.column)),
'model': model_table,
'tagged_item': qn(self.model._meta.db_table),
'content_type_id': ContentType.objects.get_for_model(model).pk,
'tag_id_placeholders': ','.join(['%s'] * tag_count),
}
cursor = connection.cursor()
cursor.execute(query, [tag.pk for tag in tags])
object_ids = [row[0] for row in cursor.fetchall()]
if len(object_ids) > 0:
return queryset.filter(pk__in=object_ids)
else:
return model._default_manager.none()
def _get_usage(self, model, counts=False, min_count=None, extra_joins=None, extra_criteria=None, params=None):
"""
Perform the custom SQL query for ``usage_for_model`` and
``usage_for_queryset``.
"""
if min_count is not None:
counts = True
model_table = qn(model._meta.db_table)
model_pk = '%s.%s' % (model_table, qn(model._meta.pk.column))
query = """
SELECT DISTINCT %(tag)s.id, %(tag)s.name%(count_sql)s
FROM
%(tag)s
INNER JOIN %(tagged_item)s
ON %(tag)s.id = %(tagged_item)s.tag_id
INNER JOIN %(model)s
ON %(tagged_item)s.object_id = %(model_pk)s
%%s
WHERE %(tagged_item)s.content_type_id = %(content_type_id)s
%%s
GROUP BY %(tag)s.id, %(tag)s.name
%%s
ORDER BY %(tag)s.name ASC""" % {
'tag': qn(self.model._meta.db_table),
'count_sql': counts and (', COUNT(%s)' % model_pk) or '',
'tagged_item': qn(ConceptItem._meta.db_table),
'model': model_table,
'model_pk': model_pk,
'content_type_id': ContentType.objects.get_for_model(model).pk,
}
min_count_sql = ''
if min_count is not None:
min_count_sql = 'HAVING COUNT(%s) >= %%s' % model_pk
params.append(min_count)
cursor = connection.cursor()
cursor.execute(query % (extra_joins, extra_criteria, min_count_sql), params)
tags = []
for row in cursor.fetchall():
t = self.model(*row[:2])
if counts:
t.count = row[2]
tags.append(t)
return tags
def usage_for_model(self, model, counts=False, min_count=None, filters=None):
"""
Obtain a list of tags associated with instances of the given
Model class.
If ``counts`` is True, a ``count`` attribute will be added to
each tag, indicating how many times it has been used against
the Model class in question.
If ``min_count`` is given, only tags which have a ``count``
greater than or equal to ``min_count`` will be returned.
Passing a value for ``min_count`` implies ``counts=True``.
To limit the tags (and counts, if specified) returned to those
used by a subset of the Model's instances, pass a dictionary
of field lookups to be applied to the given Model as the
``filters`` argument.
"""
if filters is None:
filters = {}
queryset = model._default_manager.filter()
for f in filters.items():
queryset.query.add_filter(f)
usage = self.usage_for_queryset(queryset, counts, min_count)
return usage
def usage_for_queryset(self, queryset, counts=False, min_count=None):
"""
Obtain a list of tags associated with instances of a model
contained in the given queryset.
If ``counts`` is True, a ``count`` attribute will be added to
each tag, indicating how many times it has been used against
the Model class in question.
If ``min_count`` is given, only tags which have a ``count``
greater than or equal to ``min_count`` will be returned.
Passing a value for ``min_count`` implies ``counts=True``.
"""
if getattr(queryset.query, 'get_compiler', None):
# Django 1.2+
compiler = queryset.query.get_compiler(using='default')
extra_joins = ' '.join(compiler.get_from_clause()[0][1:])
where, params = queryset.query.where.as_sql(
compiler.quote_name_unless_alias, compiler.connection
)
else:
# Django pre-1.2
extra_joins = ' '.join(queryset.query.get_from_clause()[0][1:])
where, params = queryset.query.where.as_sql()
if where:
extra_criteria = 'AND %s' % where
else:
extra_criteria = ''
return self._get_usage(queryset.model, counts, min_count, extra_joins, extra_criteria, params)
class Concept(TagBase):
"""An idea or other freeform categorization of something"""
category = models.CharField(
_('category'),
max_length=20,
blank=True, null=True,
choices=CATEGORY_CHOICES,
default='concept')
created = models.DateTimeField(_('created'),
auto_now_add=True,
editable=False)
last_tagged = models.DateTimeField(_('last time tagged'),
blank=True, null=True, editable=False, db_index=True)
substitute = models.ForeignKey('Concept',
blank=True, null=True, verbose_name=_('substitute'),
help_text=_("""Tag to use instead of this one. Moves current
associations to the substitute tag and new association attempts
are automatically swapped."""))
enabled = models.BooleanField(_("Enabled"), default=True,
help_text=_("""If unchecked, it will remove current associations and
will not allow new associations."""))
url = models.CharField(blank=True, max_length=255,
help_text=_("A URL for more information regarding this concept."))
woeid = models.IntegerField(_('where on earth id'), blank=True, null=True)
geonamesid = models.IntegerField(_('GeoNames id'), blank=True, null=True)
latitude = models.DecimalField(_('latitude'),
max_digits=11, decimal_places=6, blank=True, null=True)
longitude = models.DecimalField(_('longitude'),
max_digits=11, decimal_places=6, blank=True, null=True)
bbox_n = models.DecimalField(_('bounding box north'),
max_digits=11, decimal_places=6, blank=True, null=True)
bbox_s = models.DecimalField(_('bounding box south'),
max_digits=11, decimal_places=6, blank=True, null=True)
bbox_e = models.DecimalField(_('bounding box east'),
max_digits=11, decimal_places=6, blank=True, null=True)
bbox_w = models.DecimalField(_('bounding box west'),
max_digits=11, decimal_places=6, blank=True, null=True)
geometry = models.TextField(
_('geometry'),
blank=True, null=True)
objects = ConceptModelManager()
@property
def items(self):
if django.VERSION < (1, 2):
return self.conceptitem_items
else:
return self.concepts_conceptitem_items
def name_with_sub(self):
"""
Render the name, or name with indication what its substitute is
"""
if self.substitute:
return "%s → %s" % (self.name, self.substitute)
elif not self.enabled:
return '<span style="text-decoration: line-through">%s</span>' % self.name
else:
return self.name
name_with_sub.short_description = _("Name")
name_with_sub.admin_order_field = "name"
name_with_sub.allow_tags = True
def save(self, *args, **kwargs):
if not self.id:
self.created = datetime.datetime.today()
super(Concept, self).save(*args, **kwargs)
if self.substitute:
items = self.items.all()
items.update(tag=self.substitute)
if not self.enabled:
self.items.all().delete()
class Meta:
verbose_name = _("Concept")
verbose_name_plural = _("Concepts")
ordering = ("name",)
class ConceptItemBase(ItemBase):
if django.VERSION < (1, 2):
tag = models.ForeignKey(Concept, related_name="%(class)s_items")
else:
tag = models.ForeignKey(Concept, related_name="%(app_label)s_%(class)s_items")
class Meta:
abstract = True
@classmethod
def tags_for(cls, model, instance=None):
if instance is not None:
return cls.tag_model().objects.filter(**{
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**{
'%s__content_object__isnull' % cls.tag_relname(): False
}).distinct()
class ConceptItem(GenericTaggedItemBase, ConceptItemBase):
added = models.DateTimeField(auto_now_add=True, db_index=True)
weight = models.IntegerField(blank=True, null=True)
def save(self, *args, **kwargs):
"""
Add the date added and last_tagged to Tag
"""
if not self.added:
self.added = datetime.datetime.now()
super(ConceptItem, self).save(*args, **kwargs)
self.tag.last_tagged = self.added
Concept.objects.filter(id=self.tag.id).update(last_tagged=self.added)
# self.tag.save()
class Meta:
verbose_name = _("Concept Item")
verbose_name_plural = _("Concept Items")
ordering = ('id',)
# The association between concepts and a related item must be
# deleted when the item is deleted
def delete_listener(sender, instance, **kwargs):
ctype = ContentType.objects.get_for_model(sender)
ConceptItem.objects.filter(content_type=ctype,
object_id=instance.id).delete()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.