prompt listlengths 1 1 | compression_prompt listlengths 1 1 | target stringlengths 1.03k 828k |
|---|---|---|
[
{
"content": "Repeat the following code:\n```python\n# coding=utf-8\n\nfrom rest_test import *\nimport api.resources.users.users\n\n__author__ = 'Glebov Boris'\n\n\nclass UserListTest(RestBaseTest):\n \"\"\"\n Test all case for GET: /api/v1/users/\n \"\"\"\n def test_users_list_success(self):\n ... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# coding=utf-8\n\nfrom rest_test import *\nimport api.resources.users.users\n\n__author__ = 'Glebov Boris'\n\n\nclass UserListTest(RestBaseTest):\n \"\"\"\n Test all case for GET: /api/v1/users/\n \"\"\"\n def test_users_list_succe... | ```python
# coding=utf-8
from rest_test import *
import api.resources.users.users
__author__ = 'Glebov Boris'
class UserListTest(RestBaseTest):
"""
Test all case for GET: /api/v1/users/
"""
def test_users_list_success(self):
client_app = self.get_app_client()
r = client_app.get('/api/v1/users/')
data = json.loads(r.data)
self.assertEqual(data[u'status'], 200)
class UserSignInTest(RestBaseTest):
"""
Test all case for POST: /api/v1/users/signin/
"""
def test_users_signin_fail(self):
"""
Check what:
1. User's login not found,
OR
2. Password is incorrect
"""
client_app = self.get_app_client()
data = {
u'login': u'123',
u'password': u'123'
}
r = client_app.post('/api/v1/user/signin/', headers=self.headers, data=json.dumps(data))
response_data = json.loads(r.data)
self.assertEqual(4001, response_data[u'status'])
def test_users_signin_ok(self):
"""
Check what:
1. Users with login is exists
2. Password is correct
"""
us = ServiceLocator.resolve(ServiceLocator.USERS)
us.create(u'user1', u'user1@example.com', u'123', first_name=u'demo', last_name=u'demo')
client_app = self.get_app_client()
data = {
u'login': u'user1',
u'password': u'123'
}
r = client_app.post('/api/v1/user/signin/', headers=self.headers, data=json.dumps(data))
response_data = json.loads(r.data)
self.assertEqual(response_data[u'status'], 200)
self.assertIsNotNone(response_data[u'data'][u'auth_token'])
class UserSignUpTest(RestBaseTest):
"""
Test all case for POST: /api/v1/users/signup/
"""
def test_users_signup_fail_email_is_exists(self):
"""
Checks what we normal handled next cases:
1. Email is exists
"""
self.clear_db()
us = ServiceLocator.resolve(ServiceLocator.USERS)
us.create(u'user1', u'user1@example.com', u'123', first_name=u'demo', last_name=u'demo')
client_app = self.get_app_client()
data = {
u'login': u'user1',
u'email': u'user222@example.com',
u'password': u'123',
u'first_name': u'aa',
u'last_name': u'aa'
}
r = client_app.post('/api/v1/user/signup/', headers=self.headers, data=json.dumps(data))
response_data = json.loads(r.data)
self.assertEqual(response_data[u'status'], 4001)
self.assertEqual(response_data[u'errors'][u'error_type'], u'user_already_exists_error', u'Login already exists')
def test_users_signup_fail_login_is_exists(self):
"""
Checks what we normal handled next cases:
1. Login is exists
"""
self.clear_db()
us = ServiceLocator.resolve(ServiceLocator.USERS)
us.create(u'user1', u'user1@example.com', u'123', first_name=u'demo', last_name=u'demo')
client_app = self.get_app_client()
data = {
u'login': u'user2',
u'email': u'user1@example.com',
u'password': u'123',
u'first_name': u'aa',
u'last_name': u'aa'
}
r = client_app.post('/api/v1/user/signup/', headers=self.headers, data=json.dumps(data))
response_data = json.loads(r.data)
self.assertEqual(response_data[u'status'], 4001)
self.assertEqual(response_data[u'errors'][u'error_type'], u'email_already_exists', u'Email already exists')
def test_users_signup_ok(self):
"""
User signup is successfully
:return:
"""
self.clear_db()
client_app = self.get_app_client()
data = {
u'login': u'user1',
u'email': u'user1@example.com',
u'password': u'123',
u'first_name': u'aa',
u'last_name': u'aa'
}
r = client_app.post('/api/v1/user/signup/', headers=self.headers, data=json.dumps(data))
response_data = json.loads(r.data)
self.assertEqual(response_data[u'status'], 201)
us = ServiceLocator.resolve(ServiceLocator.USERS)
user = us.single(u'user1')
self.assertIsNotNone(user)
class UserCheckTest(RestBaseTest):
"""
Test all case for POST: /api/v1/users/check/
"""
def test_user_check_login_is_exists(self):
"""
Test case:
Login is exists
"""
data = {
u'login': u'user1',
}
response_data = self._test_check(data)
self.assertEqual(response_data[u'status'], 200)
self.assertEqual(response_data[u'data'][u'login'], False)
def test_user_check_email_is_exists(self):
"""
Test case:
Email is exists
"""
data = {
u'email': u'user1@example.com',
}
response_data = self._test_check(data)
self.assertEqual(response_data[u'status'], 200)
self.assertEqual(response_data[u'data'][u'email'], False)
def test_user_check_login_ok(self):
"""
Test case:
Login is not exists
"""
data = {
u'login': u'user2'
}
response_data = self._test_check(data)
self.assertEqual(response_data[u'status'], 200)
self.assertEqual(response_data[u'data'][u'login'], True)
def test_user_check_email_ok(self):
"""
Test case:
Email is not exists
"""
data = {
u'email': u'user2@example.com'
}
response_data = self._test_check(data)
self.assertEqual(response_data[u'status'], 200)
self.assertEqual(response_data[u'data'][u'email'], True)
def test_user_check_login_email_ok(self):
"""
Test case:
Login and Email is not exists
"""
data = {
u'login': u'user2',
u'email': u'user2@example.com'
}
response_data = self._test_check(data)
self.assertEqual(response_data[u'status'], 200)
self.assertEqual(response_data[u'data'][u'login'], True)
self.assertEqual(response_data[u'data'][u'email'], True)
def test_user_check_login_email_fail(self):
"""
Test case:
Login and Email is not exists
"""
data = {
u'login': u'user1',
u'email': u'user1@example.com'
}
response_data = self._test_check(data)
self.assertEqual(response_data[u'status'], 200)
self.assertEqual(response_data[u'data'][u'login'], False)
self.assertEqual(response_data[u'data'][u'email'], False)
def test_user_check_login_email_none(self):
"""
Test case:
Login and Email didn't send
"""
data = {
}
response_data = self._test_check(data)
self.assertEqual(response_data[u'status'], 200)
self.assertEqual(response_data[u'data'][u'login'], None)
self.assertEqual(response_data[u'data'][u'email'], None)
def _test_check(self, data):
self.clear_db()
us = ServiceLocator.resolve(ServiceLocator.USERS)
us.create(u'user1', u'user1@example.com', u'123', first_name=u'demo', last_name=u'demo')
client_app = self.get_app_client()
r = client_app.post('/api/v1/users/check/', headers=self.headers, data=json.dumps(data))
response_data = json.loads(r.data)
return response_data
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom techism2 import service\nfrom datetime import datetime, timedelta\nimport icalendar\nimport ti... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom techism2 import service\nfrom datetime import datetime, timedelta\nimport ical... | ```python
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from techism2 import service
from datetime import datetime, timedelta
import icalendar
import time
def ical(request):
ninety_days = datetime.utcnow() + timedelta(days=90)
event_list = service.get_event_query_set().filter(date_time_begin__lte=ninety_days).order_by('date_time_begin')
cal = icalendar.Calendar()
cal['prodid'] = icalendar.vText(u'-//Techism//Techism//DE')
cal['version'] = icalendar.vText(u'2.0')
cal['x-wr-calname'] = icalendar.vText(u'Techism')
cal['x-wr-caldesc'] = icalendar.vText(u'Techism - IT-Events in München')
for e in event_list:
event = icalendar.Event()
# TODO should we generate an UUID when creating the event?
uid = u'%s@techism.de' % (str(e.id))
event['uid'] = icalendar.vText(uid)
event['dtstamp'] = icalendar.vDatetime(datetime.utcnow())
# The sequence field must be incremented each time the event is modifed.
# The trick here is to subtract the create TS from the modify TS and
# use the difference as sequence.
sequence = 0
if e.date_time_created and e.date_time_modified:
createTimestamp = time.mktime(e.get_date_time_created_utc().timetuple())
modifyTimestamp = time.mktime(e.get_date_time_modified_utc().timetuple())
sequence = modifyTimestamp - createTimestamp
event['sequence'] = icalendar.vInt(sequence)
# created and last-modified
if e.date_time_created:
event['created'] = icalendar.vDatetime(e.get_date_time_created_utc())
if e.date_time_modified:
event['last-modified'] = icalendar.vDatetime(e.get_date_time_modified_utc())
# TENTATIVE, CONFIRMED, CANCELLED
event['status'] = icalendar.vText(u'CONFIRMED')
if e.title:
event['summary'] = icalendar.vText(e.title)
if e.description:
event['description'] = icalendar.vText(e.description)
if e.date_time_begin:
event['dtstart'] = icalendar.vDatetime(e.get_date_time_begin_utc())
if e.date_time_end:
event['dtend'] = icalendar.vDatetime(e.get_date_time_end_utc())
if e.url:
relative_url = reverse('event-show', args=[e.id])
absolute_url = request.build_absolute_uri(relative_url)
event['url'] = icalendar.vUri(absolute_url)
# geo value isn't used by iCal readers :-(
# maybe a trick is to add the geo coordinates to the location field using the following format:
# $latitude, $longitude ($name, $street, $city)
if e.location:
location = u'%s, %s, %s' % (e.location.name, e.location.street, e.location.city)
event['location'] = icalendar.vText(location)
if e.location and e.location.latitude and e.location.longitude:
event['geo'] = icalendar.vGeo((e.location.latitude, e.location.longitude))
cal.add_component(event)
response = HttpResponse(cal.as_string())
response['Content-Type'] = 'text/calendar; charset=UTF-8'
response['Cache-Control'] = 'no-cache, no-store, max-age=0, must-revalidate'
response['Pragma'] = 'no-cache'
response['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT'
return response
``` |
[
{
"content": "Here is the source code:\n```python\nimport hashlib\nimport pytest\n\nfrom django.contrib.auth.models import User\nfrom django.core import mail\n\nfrom factories import SecretaryFactory, AdministratorFactory\nfrom users.models import Employee\nfrom users.models import Administrator\nfrom users.mod... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\nimport hashlib\nimport pytest\n\nfrom django.contrib.auth.models import User\nfrom django.core import mail\n\nfrom factories import SecretaryFactory, AdministratorFactory\nfrom users.models import Employee\nfrom users.models import Administrator... | ```python
import hashlib
import pytest
from django.contrib.auth.models import User
from django.core import mail
from factories import SecretaryFactory, AdministratorFactory
from users.models import Employee
from users.models import Administrator
from users.models import Secretary
from django.core.exceptions import ObjectDoesNotExist
@pytest.mark.django_db
class TestRegisterUsers:
def setup(self):
self.user1 = SecretaryFactory()
self.user2 = AdministratorFactory()
def test_index_get(self,client):
response = client.get('/users/')
assert response.status_code == 200
def test_register_user_get(self,client):
client.login(username=self.user2.user.username,password='test_password')
response = client.get('/users/register/')
assert response.status_code == 200
def test_register_user_secretary_post(self,client):
client.login(username=self.user2.user.username,password='test_password')
response = client.post('/users/register/',{'employee_type':'secretary',
'name':'Marcelo',
'phone_number':'32',
'email':'marcelo@gmail.com',
'password':'123456789',
'confirmPassword':'123456789'}, follow = True)
try:
recovery = Secretary.objects.get(user= User.objects.get(username='marcelo@gmail.com'))
assert True
except ObjectDoesNotExist:
assert False
def test_register_user_admin_post(self,client):
client.login(username=self.user2.user.username,password='test_password')
response = client.post('/users/register/',{'employee_type':'administrator',
'name':'Marco',
'phone_number':'32',
'email':'marco@gmail.com',
'password':'123456789',
'confirmPassword':'123456789'}, follow = True)
try:
recovery = Administrator.objects.get(user= User.objects.get(username='marco@gmail.com'))
assert True
except ObjectDoesNotExist:
assert False
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimp... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship inf... | ```python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import functools
import logging
import sys
from django.contrib.auth import get_user_model
from django.core.validators import ValidationError, validate_email
from django.db.models import Count
from allauth.account.models import EmailAddress
from allauth.account.utils import sync_user_email_addresses
from pootle.core.contextmanagers import keep_data
from pootle.core.models import Revision
from pootle.core.signals import update_data
from pootle_store.constants import FUZZY, UNTRANSLATED
from pootle_store.models import SuggestionState
logger = logging.getLogger(__name__)
def get_user_by_email(email):
"""Retrieves auser by its email address.
First it looks up the `EmailAddress` entries, and as a safety measure
falls back to looking up the `User` entries (these addresses are
sync'ed in theory).
:param email: address of the user to look up.
:return: `User` instance belonging to `email`, `None` otherwise.
"""
try:
return EmailAddress.objects.get(email__iexact=email).user
except EmailAddress.DoesNotExist:
try:
User = get_user_model()
return User.objects.get(email__iexact=email)
except User.DoesNotExist:
return None
def write_stdout(start_msg, end_msg="DONE\n", fail_msg="FAILED\n"):
def class_wrapper(f):
@functools.wraps(f)
def method_wrapper(self, *args, **kwargs):
sys.stdout.write(start_msg % self.__dict__)
try:
result = f(self, *args, **kwargs)
except Exception as e:
sys.stdout.write(fail_msg % self.__dict__)
logger.exception(e)
raise e
sys.stdout.write(end_msg % self.__dict__)
return result
return method_wrapper
return class_wrapper
class UserMerger(object):
def __init__(self, src_user, target_user):
"""Purges src_user from site reverting any changes that they have made.
:param src_user: `User` instance to merge from.
:param target_user: `User` instance to merge to.
"""
self.src_user = src_user
self.target_user = target_user
@write_stdout("Merging user: "
"%(src_user)s --> %(target_user)s...\n",
"User merged: %(src_user)s --> %(target_user)s \n")
def merge(self):
"""Merges one user to another.
The following are fields are updated (model: fields):
- units: submitted_by, commented_by, reviewed_by
- submissions: submitter
- suggestions: user, reviewer
"""
self.merge_submitted()
self.merge_commented()
self.merge_reviewed()
self.merge_submissions()
self.merge_suggestions()
self.merge_reviews()
@write_stdout(" * Merging units comments: "
"%(src_user)s --> %(target_user)s... ")
def merge_commented(self):
"""Merge commented_by attribute on units
"""
self.src_user.commented.update(commented_by=self.target_user)
@write_stdout(" * Merging units reviewed: "
"%(src_user)s --> %(target_user)s... ")
def merge_reviewed(self):
"""Merge reviewed_by attribute on units
"""
self.src_user.reviewed.update(reviewed_by=self.target_user)
@write_stdout(" * Merging suggestion reviews: "
"%(src_user)s --> %(target_user)s... ")
def merge_reviews(self):
"""Merge reviewer attribute on suggestions
"""
self.src_user.reviews.update(reviewer=self.target_user)
@write_stdout(" * Merging remaining submissions: "
"%(src_user)s --> %(target_user)s... ")
def merge_submissions(self):
"""Merge submitter attribute on submissions
"""
# Delete orphaned submissions.
self.src_user.submission_set.filter(unit__isnull=True).delete()
# Before we can save we first have to remove existing score_logs for
# src_user - they will be recreated on save for target_user
self.src_user.scorelog_set.all().delete()
# Update submitter on submissions
self.src_user.submission_set.update(submitter=self.target_user)
@write_stdout(" * Merging units submitted_by: "
"%(src_user)s --> %(target_user)s... ")
def merge_submitted(self):
"""Merge submitted_by attribute on units
"""
self.src_user.submitted.update(submitted_by=self.target_user)
@write_stdout(" * Merging suggestions: "
"%(src_user)s --> %(target_user)s... ")
def merge_suggestions(self):
"""Merge user attribute on suggestions
"""
# Update user and reviewer on suggestions
self.src_user.suggestions.update(user=self.target_user)
class UserPurger(object):
def __init__(self, user):
"""Purges user from site reverting any changes that they have made.
:param user: `User` to purge.
"""
self.user = user
@write_stdout("Purging user: %(user)s... \n", "User purged: %(user)s \n")
def purge(self):
"""Purges user from site reverting any changes that they have made.
The following steps are taken:
- Delete units created by user and without other submissions.
- Revert units edited by user.
- Revert reviews made by user.
- Revert unit comments by user.
- Revert unit state changes by user.
- Delete any remaining submissions and suggestions.
"""
stores = set()
with keep_data():
stores |= self.remove_units_created()
stores |= self.revert_units_edited()
stores |= self.revert_units_reviewed()
stores |= self.revert_units_commented()
stores |= self.revert_units_state_changed()
# Delete remaining submissions.
logger.debug("Deleting remaining submissions for: %s", self.user)
self.user.submission_set.all().delete()
# Delete remaining suggestions.
logger.debug("Deleting remaining suggestions for: %s", self.user)
self.user.suggestions.all().delete()
for store in stores:
update_data.send(store.__class__, instance=store)
@write_stdout(" * Removing units created by: %(user)s... ")
def remove_units_created(self):
"""Remove units created by user that have not had further
activity.
"""
stores = set()
# Delete units created by user without submissions by others.
for unit in self.user.get_units_created().iterator():
stores.add(unit.store)
# Find submissions by other users on this unit.
other_subs = unit.submission_set.exclude(submitter=self.user)
if not other_subs.exists():
unit.delete()
logger.debug("Unit deleted: %s", repr(unit))
return stores
@write_stdout(" * Reverting unit comments by: %(user)s... ")
def revert_units_commented(self):
"""Revert comments made by user on units to previous comment or else
just remove the comment.
"""
stores = set()
# Revert unit comments where self.user is latest commenter.
for unit in self.user.commented.iterator():
stores.add(unit.store)
# Find comments by other self.users
comments = unit.get_comments().exclude(submitter=self.user)
if comments.exists():
# If there are previous comments by others update the
# translator_comment, commented_by, and commented_on
last_comment = comments.latest('pk')
unit.translator_comment = last_comment.new_value
unit.commented_by_id = last_comment.submitter_id
unit.commented_on = last_comment.creation_time
logger.debug("Unit comment reverted: %s", repr(unit))
else:
unit.translator_comment = ""
unit.commented_by = None
unit.commented_on = None
logger.debug("Unit comment removed: %s", repr(unit))
# Increment revision
unit.save()
return stores
@write_stdout(" * Reverting units edited by: %(user)s... ")
def revert_units_edited(self):
"""Revert unit edits made by a user to previous edit.
"""
stores = set()
# Revert unit target where user is the last submitter.
for unit in self.user.submitted.iterator():
stores.add(unit.store)
# Find the last submission by different user that updated the
# unit.target.
edits = unit.get_edits().exclude(submitter=self.user)
if edits.exists():
last_edit = edits.latest("pk")
unit.target_f = last_edit.new_value
unit.submitted_by_id = last_edit.submitter_id
unit.submitted_on = last_edit.creation_time
logger.debug("Unit edit reverted: %s", repr(unit))
else:
# if there is no previous submissions set the target to "" and
# set the unit.submitted_by to None
unit.target_f = ""
unit.submitted_by = None
unit.submitted_on = unit.creation_time
logger.debug("Unit edit removed: %s", repr(unit))
# Increment revision
unit.revision = Revision.incr()
unit.save()
return stores
@write_stdout(" * Reverting units reviewed by: %(user)s... ")
def revert_units_reviewed(self):
"""Revert reviews made by user on suggestions to previous state.
"""
stores = set()
pending = SuggestionState.objects.get(name="pending")
# Revert reviews by this user.
for review in self.user.get_suggestion_reviews().iterator():
suggestion = review.suggestion
stores.add(suggestion.unit.store)
if suggestion.user_id == self.user.id:
# If the suggestion was also created by this user then remove
# both review and suggestion.
suggestion.delete()
logger.debug("Suggestion removed: %s", (suggestion))
elif suggestion.reviewer_id == self.user.id:
# If the suggestion is showing as reviewed by the user, then
# set the suggestion back to pending and update
# reviewer/review_time.
suggestion.state = pending
suggestion.reviewer = None
suggestion.review_time = None
suggestion.save()
logger.debug("Suggestion reverted: %s", (suggestion))
# Remove the review.
review.delete()
for unit in self.user.reviewed.iterator():
stores.add(unit.store)
unit.suggestion_set.filter(reviewer=self.user).update(
state=SuggestionState.objects.get(name="pending"),
reviewer=None)
revision = None
unit.reviewed_by = None
unit.reviewed_on = None
# Increment revision
revision = Revision.incr()
logger.debug("Unit reviewed_by removed: %s", repr(unit))
unit.revision = revision
unit.save()
return stores
@write_stdout(" * Reverting unit state changes by: %(user)s... ")
def revert_units_state_changed(self):
"""Revert unit edits made by a user to previous edit.
"""
stores = set()
# Delete orphaned submissions.
self.user.submission_set.filter(unit__isnull=True).delete()
for submission in self.user.get_unit_states_changed().iterator():
unit = submission.unit
stores.add(unit.store)
# We have to get latest by pk as on mysql precision is not to
# microseconds - so creation_time can be ambiguous
if submission != unit.get_state_changes().latest('pk'):
# If the unit has been changed more recently we don't need to
# revert the unit state.
submission.delete()
return
submission.delete()
other_submissions = (unit.get_state_changes()
.exclude(submitter=self.user))
if other_submissions.exists():
new_state = other_submissions.latest('pk').new_value
else:
new_state = UNTRANSLATED
if new_state != unit.state:
if unit.state == FUZZY:
unit.markfuzzy(False)
elif new_state == FUZZY:
unit.markfuzzy(True)
unit.state = new_state
# Increment revision
unit.revision = Revision.incr()
unit.save()
logger.debug("Unit state reverted: %s", repr(unit))
return stores
def verify_user(user):
"""Verify a user account without email confirmation
If the user has an existing primary allauth.EmailAddress set then this is
verified.
Otherwise, an allauth.EmailAddress is created using email set for
User.email.
If the user is already verified raises a ValueError
:param user: `User` to verify
"""
if not user.email:
raise ValidationError("You cannot verify an account with no email "
"set. You can set this user's email with "
"'pootle update_user_email %s EMAIL'"
% user.username)
# Ensure this user's email address is unique
try:
validate_email_unique(user.email, user)
except ValidationError:
raise ValidationError("This user's email is not unique. You can find "
"duplicate emails with 'pootle "
"find_duplicate_emails'")
# already has primary?
existing_primary = EmailAddress.objects.filter(user=user, primary=True)
if existing_primary.exists():
existing_primary = existing_primary.first()
if not existing_primary.verified:
existing_primary.verified = True
existing_primary.save()
return
else:
# already verified
raise ValueError("User '%s' is already verified" % user.username)
sync_user_email_addresses(user)
email_address = (EmailAddress.objects
.filter(user=user, email__iexact=user.email)
.order_by("primary")).first()
email_address.verified = True
email_address.primary = True
email_address.save()
def get_duplicate_emails():
"""Get a list of emails that occur more than once in user accounts.
"""
return (get_user_model().objects.hide_meta()
.values('email')
.annotate(Count('email'))
.filter(email__count__gt=1)
.values_list("email", flat=True))
def validate_email_unique(email, for_user=None):
"""Validates an email to ensure it does not already exist in the system.
:param email: Email address to validate for uniqueness.
:param for_user: Optionally check an email address is unique to this user
"""
existing_accounts = get_user_model().objects.filter(email=email)
existing_email = EmailAddress.objects.filter(email=email)
if for_user is not None:
existing_accounts = existing_accounts.exclude(pk=for_user.pk)
existing_email = existing_email.exclude(user=for_user)
if existing_accounts.exists() or existing_email.exists():
raise ValidationError("A user with that email address already exists")
def update_user_email(user, new_email):
"""Updates a user's email with new_email.
:param user: `User` to update email for.
:param new_email: Email address to update with.
"""
validate_email_unique(new_email)
validate_email(new_email)
user.email = new_email
user.save()
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n#!/usr/bin/env python3\n# -*- mode: python -*-\n\n# This script is used to prepare the tarballs that a version 002 client\n# submits for further processing. It copies the tarballs and their MD5\n# sums to the archive (after chec... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n#!/usr/bin/env python3\n# -*- mode: python -*-\n\n# This script is used to prepare the tarballs that a version 002 client\n# submits for further processing. It copies the tarballs and their MD5\n# sums to the arc... | ```python
#!/usr/bin/env python3
# -*- mode: python -*-
# This script is used to prepare the tarballs that a version 002 client
# submits for further processing. It copies the tarballs and their MD5
# sums to the archive (after checking) and sets the state links, so
# that the dispatch script will pick them up and get the ball
# rolling. IOW, it does impedance matching between version 002 clients
# and the server scripts.
import os
import sys
import glob
import shutil
import selinux
import tempfile
from pathlib import Path
from pbench.common.exceptions import BadConfig
from pbench.common.logger import get_pbench_logger
from pbench.common.utils import md5sum
from pbench.server import PbenchServerConfig
from pbench.server.report import Report
from pbench.server.utils import quarantine
from pbench.server.database.models.tracker import Dataset, States, DatasetError
from pbench.server.database import init_db
_NAME_ = "pbench-server-prep-shim-002"
class Results:
def __init__(
self, nstatus="", ntotal=0, ntbs=0, nquarantined=0, ndups=0, nerrs=0,
):
self.nstatus = nstatus
self.ntotal = ntotal
self.ntbs = ntbs
self.nquarantined = nquarantined
self.ndups = ndups
self.nerrs = nerrs
def fetch_config_val(config, logger):
qdir = config.get("pbench-server", "pbench-quarantine-dir")
if not qdir:
logger.error("Failed: getconf.py pbench-quarantine-dir pbench-server")
return None, None
qdir = Path(qdir).resolve()
if not qdir.is_dir():
logger.error("Failed: {} does not exist, or is not a directory", qdir)
return None, None
# we are explicitly handling version-002 data in this shim
receive_dir_prefix = config.get("pbench-server", "pbench-receive-dir-prefix")
if not receive_dir_prefix:
logger.error("Failed: getconf.py pbench-receive-dir-prefix pbench-server")
return None, None
receive_dir = Path(f"{receive_dir_prefix}-002").resolve()
if not receive_dir.is_dir():
logger.error("Failed: {} does not exist, or is not a directory", receive_dir)
return None, None
return (qdir, receive_dir)
def qdirs_check(qdir_val, qdir, logger):
try:
os.makedirs(qdir)
except FileExistsError:
# directory already exists, ignore
pass
except Exception:
logger.exception(
"os.mkdir: Unable to create {} destination directory: {}", qdir_val, qdir,
)
return None
return qdir
def md5_check(tb, tbmd5, logger):
# read the md5sum from md5 file
try:
with tbmd5.open() as f:
archive_md5_hex_value = f.readline().split(" ")[0]
except Exception:
archive_md5_hex_value = None
logger.exception("Quarantine: Could not read {}", tbmd5)
# get hex value of the tarball's md5sum
try:
(_, archive_tar_hex_value) = md5sum(tb)
except Exception:
archive_tar_hex_value = None
logger.exception("Quarantine: Could not read {}", tb)
return (archive_md5_hex_value, archive_tar_hex_value)
def process_tb(config, logger, receive_dir, qdir_md5, duplicates, errors):
# Check for results that are ready for processing: version 002 agents
# upload the MD5 file as xxx.md5.check and they rename it to xxx.md5
# after they are done with MD5 checking so that's what we look for.
list_check = glob.glob(
os.path.join(receive_dir, "**", "*.tar.xz.md5"), recursive=True
)
archive = config.ARCHIVE
logger.info("{}", config.TS)
list_check.sort()
nstatus = ""
ntotal = ntbs = nerrs = nquarantined = ndups = 0
for tbmd5 in list_check:
ntotal += 1
# full pathname of tarball
tb = Path(tbmd5[0:-4])
tbmd5 = Path(tbmd5)
# directory
tbdir = tb.parent
# resultname: get the basename foo.tar.xz and then strip the .tar.xz
resultname = tb.name
controller = tbdir.name
dest = archive / controller
# Create a new dataset tracker in UPLOADING state, and add it to the
# database.
#
# NOTE: Technically, this particular workflow has no "UPLOADING" as
# the `pbench-server-prep-shim-002` command isn't invoked until the
# tarball and MD5 has been entirely uploaded by the agent via `ssh`;
# this method however can't be supported once we have authorized user
# ownership, and the model fits the server `PUT` method where an
# unexpected termination could leave a tarball in "Uploading" state.
#
# TODO: We have no way to identify an owner here, so assign it to
# the arbitrary "pbench" user. This will go away when we drop this
# component entirely in favor of PUT.
try:
dataset = Dataset.create(
controller=controller, path=resultname, owner="pbench"
)
except DatasetError as e:
logger.error(
"Unable to create dataset {}>{}: {}", controller, resultname, str(e)
)
# TODO: Should we quarantine over this? Note it's not quite
# straightforward, as quarantine() expects that the Dataset has
# been created, so we'll get a cascade failure. Since prep-shim's
# days are numbered, I'm inclined not to worry about it here.
dataset = None
if all([(dest / resultname).is_file(), (dest / tbmd5.name).is_file()]):
logger.error("{}: Duplicate: {} duplicate name", config.TS, tb)
quarantine((duplicates / controller), logger, tb, tbmd5)
ndups += 1
continue
archive_tar_hex_value, archive_md5_hex_value = md5_check(tb, tbmd5, logger)
if any(
[
archive_tar_hex_value != archive_md5_hex_value,
archive_tar_hex_value is None,
archive_md5_hex_value is None,
]
):
logger.error("{}: Quarantined: {} failed MD5 check", config.TS, tb)
logger.info("{}: FAILED", tb.name)
logger.info("md5sum: WARNING: 1 computed checksum did NOT match")
quarantine((qdir_md5 / controller), logger, tb, tbmd5)
nquarantined += 1
continue
if dataset:
try:
dataset.md5 = archive_md5_hex_value
dataset.update()
except DatasetError as e:
logger.warn(
"Unable to update dataset {} with md5: {}", str(dataset), str(e)
)
# make the destination directory and its TODO subdir if necessary.
try:
os.makedirs(dest / "TODO")
except FileExistsError:
# directory already exists, ignore
pass
except Exception:
logger.error("{}: Error in creating TODO directory.", config.TS)
quarantine(os.path.join(errors, controller), logger, tb, tbmd5)
nerrs += 1
continue
# First, copy the small .md5 file to the destination. That way, if
# that operation fails it will fail quickly since the file is small.
try:
shutil.copy2(tbmd5, dest)
except Exception:
logger.error(
"{}: Error in copying .md5 file to Destination path.", config.TS
)
try:
os.remove(dest / tbmd5.name)
except FileNotFoundError:
logger.error(
"{}: Warning: cleanup of copy failure failed itself.", config.TS
)
quarantine((errors / controller), logger, tb, tbmd5)
nerrs += 1
continue
# Next, mv the "large" tar ball to the destination. If the destination
# is on the same device, the move should be quick. If the destination is
# on a different device, the move will be a copy and delete, and will
# take a bit longer. If it fails, the file will NOT be at the
# destination.
try:
shutil.move(str(tb), str(dest))
except Exception:
logger.error(
"{}: Error in moving tarball file to Destination path.", config.TS
)
try:
os.remove(dest / resultname)
except FileNotFoundError:
logger.error(
"{}: Warning: cleanup of copy failure failed itself.", config.TS
)
quarantine((errors / controller), logger, tb, tbmd5)
nerrs += 1
continue
# Restore the SELinux context properly
try:
selinux.restorecon(dest / tb.name)
selinux.restorecon(dest / tbmd5.name)
except Exception as e:
# log it but do not abort
logger.error("{}: Error: 'restorecon {}', {}", config.TS, dest / tb.name, e)
# Now that we have successfully moved the tar ball and its .md5 to the
# destination, we can remove the original .md5 file.
try:
os.remove(tbmd5)
except Exception as exc:
logger.error(
"{}: Warning: cleanup of successful copy operation failed: '{}'",
config.TS,
exc,
)
try:
os.symlink((dest / resultname), (dest / "TODO" / resultname))
except Exception as exc:
logger.error("{}: Error in creation of symlink. '{}'", config.TS, exc)
# if we fail to make the link, we quarantine the (already moved)
# tarball and .md5.
quarantine(
(errors / controller), logger, (dest / tb), (dest / tbmd5),
)
nerrs += 1
continue
ntbs += 1
try:
if dataset:
dataset.advance(States.UPLOADED)
except Exception:
logger.exception("Unable to finalize {}", dataset)
nstatus = f"{nstatus}{config.TS}: processed {tb}\n"
logger.info(f"{tb.name}: OK")
return Results(
nstatus=nstatus,
ntotal=ntotal,
ntbs=ntbs,
nquarantined=nquarantined,
ndups=ndups,
nerrs=nerrs,
)
def main(cfg_name):
if not cfg_name:
print(
f"{_NAME_}: ERROR: No config file specified; set"
" _PBENCH_SERVER_CONFIG env variable or use --config <file> on the"
" command line",
file=sys.stderr,
)
return 2
try:
config = PbenchServerConfig(cfg_name)
except BadConfig as e:
print(f"{_NAME_}: {e} (config file {cfg_name})", file=sys.stderr)
return 1
logger = get_pbench_logger(_NAME_, config)
# We're going to need the Postgres DB to track dataset state, so setup
# DB access.
init_db(config, logger)
qdir, receive_dir = fetch_config_val(config, logger)
if qdir is None and receive_dir is None:
return 2
qdir_md5 = qdirs_check("quarantine", Path(qdir, "md5-002"), logger)
duplicates = qdirs_check("duplicates", Path(qdir, "duplicates-002"), logger)
# The following directory holds tarballs that are quarantined because
# of operational errors on the server. They should be retried after
# the problem is fixed: basically, move them back into the reception
# area for 002 agents and wait.
errors = qdirs_check("errors", Path(qdir, "errors-002"), logger)
if qdir_md5 is None or duplicates is None or errors is None:
return 1
counts = process_tb(config, logger, receive_dir, qdir_md5, duplicates, errors)
result_string = (
f"{config.TS}: Processed {counts.ntotal} entries,"
f" {counts.ntbs} tarballs successful,"
f" {counts.nquarantined} quarantined tarballs,"
f" {counts.ndups} duplicately-named tarballs,"
f" {counts.nerrs} errors."
)
logger.info(result_string)
# prepare and send report
with tempfile.NamedTemporaryFile(mode="w+t", dir=config.TMP) as reportfp:
reportfp.write(f"{counts.nstatus}{result_string}\n")
reportfp.seek(0)
report = Report(config, _NAME_)
report.init_report_template()
try:
report.post_status(config.timestamp(), "status", reportfp.name)
except Exception as exc:
logger.warning("Report post Unsuccesful: '{}'", exc)
return 0
if __name__ == "__main__":
cfg_name = os.environ.get("_PBENCH_SERVER_CONFIG")
status = main(cfg_name)
sys.exit(status)
``` |
[
{
"content": "Return the code unaltered:\n```python\n# import sys\n# sys.path.append(\".\")\n\nfrom keras.layers import Input, Dense\nimport keras.optimizers\nfrom keras.models import Model\nimport sklearn.model_selection as mds\nimport numpy as np\nimport problem.load_data as load_data\nimport problem.train_MI... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\n# import sys\n# sys.path.append(\".\")\n\nfrom keras.layers import Input, Dense\nimport keras.optimizers\nfrom keras.models import Model\nimport sklearn.model_selection as mds\nimport numpy as np\nimport problem.load_data as load_data\nimport ... | ```python
# import sys
# sys.path.append(".")
from keras.layers import Input, Dense
import keras.optimizers
from keras.models import Model
import sklearn.model_selection as mds
import numpy as np
import problem.load_data as load_data
import problem.train_MINST as train_MINST
import matplotlib.pyplot as plt
import hyperLearn.sample as sp
############################################
## function to learn the RSM of the model ##
############################################
def learn_RSM(x, y):
# x_train, x_test, y_train, y_test = mds.train_test_split(x, y, test_size=0.10, random_state=0)
# x_train, x_valid, y_train, y_valid = mds.train_test_split(x, y, test_size=0.10, random_state=0)
# print('RSMdata',x_train,y_train)
x_train = x
y_train = y
## due to the size of the sets we use all data for training and validation, the RSM may overfit
x_valid = x
y_valid = y
dim = x.shape[1]
input = Input(shape=(dim,))
# the network architecture : 2 hidden layers as indicated in the article
network = Dense(20 * dim, activation="relu")(input)
network = Dense(20 * dim, activation="relu")(network)
network = Dense(1, activation="linear")(network)
# print(x_train,y_train)
model = Model(input=input, output=network)
opt = keras.optimizers.SGD(lr=0.1)
model.compile(optimizer=opt, loss="mse")
loss = model.fit(x_train, y_train,
nb_epoch=100,
batch_size=20,
verbose=0,
shuffle=True,
validation_data=(x_valid, y_valid))
return model.evaluate(x_valid, y_valid, verbose=0), model
## function to add a value to the training set for the RSM
def add_train(old, new):
to_train = new[0]
if old[0] == None:
return np.matrix(to_train), [new[1]]
else:
# print(old[0].shape,np.matrix(to_train).shape)
return np.append(old[0], np.matrix(to_train), axis=0), np.append(old[1], [new[1]], axis=0)
## main function
def learn_hyperparam():
data = load_data.load_data()
x = None
y = None
# initialisation : 5 random training
for i in range(5):
s = sp.sample()
print(s.get_MNIST())
res = train_MINST.train_model_s(s.get_MNIST(), data)
(x, y) = add_train((x, y), (s.get_RSM(), res[0]))
(acc, RSM_model) = learn_RSM(x, y)
# initialisation of loops variables
hist_acc = []
hist_res = []
macc = []
alpha = 0.001
max_acc = max(y)
best_model = []
k = 0
# first sample is randomly chosen
s = sp.sample()
best_conf = s
while k < 200:
print('n° test', k)
# with a little propability we come bach to the best configuration found yet
if np.random.uniform(0, 1) < alpha * 0.001:
s = best_conf
# gaussion sampling of the next solution
s = s.gaussian_samp()
# prediction of its performance
p = RSM_model.predict(np.matrix(s.get_RSM()))
print('predict_loss ', p, 'acc', max_acc)
# test conditions :
if p > max_acc:
r = np.random.uniform(0, 1)
if r > alpha:
RSM_model, best_conf, best_model, k, max_acc, x, y = test_mod(RSM_model, best_conf, best_model, data,
hist_acc, hist_res, k, macc, max_acc, s,
x, y)
else:
r = np.random.uniform(0, 1)
if r < alpha:
RSM_model, best_conf, best_model, k, max_acc, x, y = test_mod(RSM_model, best_conf, best_model, data,
hist_acc, hist_res, k, macc, max_acc, s,
x, y)
# plot the results
print('bestconf',best_conf)
print('error of RSM',max_acc)
plt.plot(range(len(macc)), macc)
plt.plot(range(len(macc)), macc)
plt.plot(range(len(hist_acc)), hist_acc)
plt.show()
return best_conf, best_model, max_acc
# function to test configuration and update paramters
# - train MNIST
# - compare the results
# - add solution to RSM dataset
# - train RSM
# - save configurations
def test_mod(RSM_model, best_conf, best_model, data, hist_acc, hist_res, k, macc, max_acc, s, x, y):
res = train_MINST.train_model_s(s.get_MNIST(), data)
if res[0] > max_acc:
max_acc = res[0]
best_model = res[1]
best_conf = s
(x, y) = add_train((x, y), (s.get_RSM(), res[0]))
(acc, RSM_model) = learn_RSM(x, y)
macc.append(max_acc)
hist_res.append(res[0])
hist_acc.append(acc)
np.save('../hyperlearn/hist_macc',macc)
np.save('../hyperlearn/hist_res',hist_res)
np.save('../hyperlearn/hist_acc',hist_acc)
print('RSM_acc', acc)
k = k + 1
return RSM_model, best_conf, best_model, k, max_acc, x, y
if __name__ == '__main__':
res = learn_hyperparam()
print('bestconf',res[0])
print('error of RSM',res[3])
# s=[0, np.array([], dtype='float64'), 0.14289055892459254, 0.0, 0.0, 0.64011881862533493, 0.66598721505367042, 1, 1]
# print(train_MINST.train_model_s(s, data)[0])
``` |
[
{
"content": "```python\n\"\"\"\r\nClasses for Various Statistical Distributions.\r\n\r\nReferences:\r\n - Regress+ A compendium of common probability distributions (version 2.3)\r\n by Michael P. McLaughlin (mpmcl@mitre.org)\r\n http://www.causascientia.org/math_stat/Dists/Compendium.pdf\r\n - Hand... | [
{
"content": "<|memory_start|>```python\n\"\"\"\r\nClasses for Various Statistical Distributions.\r\n\r\nReferences:\r\n - Regress+ A compendium of common probability distributions (version 2.3)\r\n by Michael P. McLaughlin (mpmcl@mitre.org)\r\n http://www.causascientia.org/math_stat/Dists/Compendium.p... | ```python
"""
Classes for Various Statistical Distributions.
References:
- Regress+ A compendium of common probability distributions (version 2.3)
by Michael P. McLaughlin (mpmcl@mitre.org)
http://www.causascientia.org/math_stat/Dists/Compendium.pdf
- Hand-book on statistical distributions for experimentalists
Internal report SUF-PFY/96-01. University of Stockholms
by Christian Walck (walck@physto.se)
Distributions:
- BetaDistribution(location, scale, p, q)
- PowerFunctionDistribution(shape)
- BinomialDistribution(success, trial)
- BernoulliDistribution(success)
- BradfordDistribution
- BurrDistribution
- CauchyDistribution(location=0.0, scale=1.0)
- LorentzDistribution (alias of CauchyDistribution)
- ChiDistribution
- HalfNormalDistribution(location, scale)
- MaxwellDistribution(scale)
- RayleighDistribution(scale)
- CosineDistribution(location=0.0, scale=1.0)
- DoubleGammaDistribution
- DoubleWeibullDistribution
- ExponentialDistribution(location=0.0, scale=1.0)
- NegativeExponentialDistribution (alias of ExponentialDistribution)
- ExtremeLBDistribution
- FDistribution
- FiskDistribution
- LogLogisticDistribution (alias of FiskDistribution)
- FoldedNormalDistribution
- GammaDistribution
- ChiSquareDistribution(df)
- ErlangDistribution(shape)
- FurryDistribution (alias of GammaDistribution)
- GenLogisticDistribution
- GeometricDistribution(success=0.5)
- GumbelDistribution(location, scale)
- FisherTippettDistribution (alias of GumbelDistribution)
- GompertzDistribution (alias of GumbelDistribution)
- LogWeibullDistribution (alias of GumbelDistribution)
- HyperbolicSecantDistribution
- HypergeometricDistribution
- InverseNormalDistribution
- WaldDistribution (alias of InverseNormalDistribution)
- LaplaceDistribution
- BilateralExponentialDistribution (alias of LaplaceDistribution)
- DoubleExponentialDistribution (alias of LaplaceDistribution)
- LogarithmicDistribution(shape)
- LogisticDistribution
- SechSquaredDistribution (alias of LogisticDistribution)
- LogNormalDistribution
- AntiLogNormalDistribution (alias of LogNormalDistribution)
- CobbDouglasDistribution (alias of LogNormalDistribution)
- NakagamiDistribution
- NegativeBinomialDistribution(success, target)
- PascalDistribution(success, target)
- PolyaDistribution (alias of NegativeBinomialDistribution)
- NormalDistribution()
- ParetoDistribution(location=1.0, shape=1.0)
- PoissonDistribution(expectation)
- RademacherDistribution()
- ReciprocalDistribution
- SemicircularDistribution(location=0.0, scale=1.0)
- TDistribution(location=0.0, scale=1.0, shape=2)
- TriangularDistribution
- UniformDistribution(location, scale)
- RectangularDistribution (alias of UniformDistribution)
- WeibullDistribution
- FrechetDistribution (alias of WeibullDistribution)
Copyright (c) Maurice H.T. Ling <mauriceling@acm.org>
Date created: 17th August 2005
"""
import math
import random
from .copadsexceptions import DistributionParameterError
from .copadsexceptions import DistributionFunctionError
from .copadsexceptions import NormalDistributionTypeError
from . import nrpy
from . import constants
class Distribution:
"""
Abstract class for all statistical distributions.
Due to the large variations of parameters for each distribution, it is
unlikely to be able to standardize a parameter list for each method that
is meaningful for all distributions. Instead, the parameters to construct
each distribution is to be given as keyword arguments.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
"""
def __init__(self, **parameters):
"""
Constructor method. The parameters are used to construct the
probability distribution.
"""
raise NotImplementedError
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability. CDF is
also known as density function.
"""
raise NotImplementedError
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution.
"""
raise NotImplementedError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis.
"""
raise NotImplementedError
def mean(self):
"""
Gives the arithmetic mean of the sample.
"""
raise NotImplementedError
def mode(self):
"""
Gives the mode of the sample, if closed-form is available.
"""
raise NotImplementedError
def kurtosis(self):
"""
Gives the kurtosis of the sample.
"""
raise NotImplementedError
def skew(self):
"""
Gives the skew of the sample.
"""
raise NotImplementedError
def variance(self):
"""
Gives the variance of the sample.
"""
raise NotImplementedError
# ----------------------------------------------------------
# Tested Distributions
# ----------------------------------------------------------
class BetaDistribution(Distribution):
"""
Class for Beta Distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, location, scale, p, q):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale: upper bound
@param p: shape parameter. Although no upper bound but seldom exceed 10.
@param q: shape parameter. Although no upper bound but seldom exceed 10.
"""
self.location = float(location)
self.scale = float(scale)
self.p = float(p)
self.q = float(q)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return nrpy.betai(self.p, self.q, (x - self.location)/
(self.scale - self.location))
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability
for particular value of x, or the area under probability
distribution from x-h to x+h for continuous distribution.
"""
n = (self.scale - self.location) ** (self.p + self.q - 1)
n = nrpy.gammln(self.p) * nrpy.gammln(self.q) * n
n = nrpy.gammln(self.p + self.q) / n
p = (x - self.location) ** (self.p - 1)
q = (self.scale - x) ** (self.q - 1)
return n * p * q
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability
value and returns the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
n = (self.location * self.q) + (self.scale * self.p)
return n / (self.p + self.q)
def mode(self):
"""Gives the mode of the sample."""
n = (self.location * (self.q - 1)) + (self.scale * \
(self.p - 1))
return n / (self.p + self.q - 2)
def kurtosis(self):
"""Gives the kurtosis of the sample."""
n = (self.p ** 2) * (self.q + 2) + \
(2 * (self.q ** 2)) + \
((self.p * self.q) * (self.q - 2))
n = n * (self.p + self.q + 1)
d = self.p * self.q * (self.p + self.q + 2) * \
(self.p + self.q + 3)
return 3 * ((n / d) - 1)
def skew(self):
"""Gives the skew of the sample."""
d = (self.p + self.q) ** 3
d = d * (self.p + self.q + 1) * (self.p + self.q + 2)
e = ((self.p + self.q) ** 2) * (self.p + self.q + 1)
e = (self.p * self.q) / e
e = e ** 1.5
return ((2 * self.p * self.q) * (self.q - self.q)) / (d * e)
def variance(self):
"""Gives the variance of the sample."""
n = self.p * self.q * ((self.scale - self.location) ** 2)
d = (self.p + self.q + 1) * ((self.p + self.q) ** 2)
return n / d
def moment(self, r):
"""Gives the r-th moment of the sample."""
return nrpy.beta(self.p + r,
self.q)/nrpy.beta(self.p, self.q)
def random(self):
"""Gives a random number based on the distribution."""
return random.betavariate(self.p, self.q)
class BinomialDistribution(Distribution):
"""
Class for Binomial Distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, success=0.5, trial=1000):
"""
Constructor method. The parameters are used to construct
the probability distribution.
@param success: probability of success; 0 <= success <= 1
@param trial: number of Bernoulli trials
"""
self.success = float(success)
self.trial = int(trial)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return nrpy.cdf_binomial(x, self.trial, self.success)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution.
"""
x = int(x)
return nrpy.bico(self.trial, x) * \
(self.success ** x) * \
((1 - self.success) ** (self.trial - x))
def inverseCDF(self, probability, start=0, step=1):
"""
It does the reverse of CDF() method, it takes a probability
value and returns the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.success * self.trial
def mode(self):
"""Gives the mode of the sample."""
return int(self.success * (self.trial + 1))
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return (1 - ((6 * self.success * (1 - self.success))) /
(self.trial * self.success * (1 - self.success)))
def skew(self):
"""Gives the skew of the sample."""
return (1 - self.success - self.success)/ \
((self.trial * self.success * (1 - self.success)) ** 0.5)
def variance(self):
"""Gives the variance of the sample."""
return self.mean() * (1 - self.success)
class CauchyDistribution(Distribution):
"""
Class for Cauchy Distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, location=0.0, scale=1.0):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location: the mean; default = 0.0
@param scale: spread of the distribution, S{lambda}; default = 1.0
"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return 0.5 + 1 / PI * math.atan((x - self.location) / self.scale)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return 1 / (PI * self.scale * \
(1 + (((x - self.location) / self.scale) ** 2)))
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
raise DistributionFunctionError('Mean for Cauchy Distribution is \
undefined')
def mode(self):
"""Gives the mode of the sample."""
return self.location
def median(self):
"""Gives the median of the sample."""
return self.location
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location - self.scale
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return self.location + self.scale
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.5
def random(self, seed):
"""Gives a random number based on the distribution."""
while 1:
seed = self.loaction + (self.scale * math.tan(PI * (seed - 0.5)))
yield seed
class CosineDistribution(Distribution):
"""
Cosine distribution is sometimes used as a simple approximation to
Normal distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, location=0.0, scale=1.0):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location: the mean; default = 0.0
@param scale: spread of the distribution, S{lambda}; default = 1.0
"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
n = PI + (x - self.location) / self.scale + \
math.sin((x - self.location) / self.scale)
return n / PI2
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (1 / (PI2 * self.scale)) * \
(1 + math.cos((x - self.location) / self.scale))
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location
def mode(self):
"""Gives the mode of the sample."""
return self.location
def median(self):
"""Gives the median of the sample."""
return self.location
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return -0.5938
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return (((PI * PI)/3) - 2) * (self.scale ** 2)
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location - (0.8317 * self.scale)
def quantile3(self):
"""Gives the 13rd quantile of the sample."""
return self.location + (0.8317 * self.scale)
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.5
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class ExponentialDistribution(Distribution):
"""
Exponential distribution is the continuous version of Geometric
distribution. It is also a special case of Gamma distribution where
shape = 1
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, location=0.0, scale=1.0):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location: position of the distribution, default = 0.0
@param scale: spread of the distribution, S{lambda}; default = 1.0"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return 1 - math.exp((self.location - x) / self.scale)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (1/self.scale) * math.exp((self.location - x)/self.scale)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location + self.scale
def mode(self):
"""Gives the mode of the sample."""
return self.location
def median(self):
"""Gives the median of the sample."""
return self.location + (self.scale * math.log10(2))
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return 6.0
def skew(self):
"""Gives the skew of the sample."""
return 2.0
def variance(self):
"""Gives the variance of the sample."""
return self.scale * self.scale
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location + (self.scale * math.log10(1.333))
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return self.location + (self.scale * math.log10(4))
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.6321
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.0
def random(self):
"""Gives a random number based on the distribution."""
return random.expovariate(1/self.location)
class FDistribution(Distribution):
"""
Class for F Distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, df1=1, df2=1):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param df1: degrees of freedom for numerator
@param df2: degrees of freedom for denorminator
"""
self.df1 = float(df1)
self.df2 = float(df2)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
sub_x = (self.df1 * x) / (self.df1 * x + self.df2)
return nrpy.betai(self.df1 / 2.0, self.df2 / 2.0, sub_x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability
for particular value of x, or the area under probability
distribution from x-h to x+h for continuous distribution.
"""
x = float(x)
n1 = ((x * self.df1) ** self.df1) * (self.df2 ** self.df2)
n2 = (x * self.df1 + self.df2) ** (self.df1 + self.df2)
d = x * nrpy.beta(self.df1 / 2.0, self.df2 / 2.0)
return math.sqrt(n1 / n2) / d
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return float(self.df2 / (self.df2 - 2))
class GammaDistribution(Distribution):
"""
Class for Gamma Distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, location, scale, shape):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale:
@param shape:"""
self.location = float(location)
self.scale = float(scale)
self.shape = float(shape)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return nrpy.gammp(self.shape, (x - self.location) / self.scale)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location + (self.scale * self.shape)
def mode(self):
"""Gives the mode of the sample."""
return self.location + (self.scale * (self.shape - 1))
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return 6 / self.shape
def skew(self):
"""Gives the skew of the sample."""
return 2 / math.sqrt(self.shape)
def variance(self):
"""Gives the variance of the sample."""
return self.scale * self.scale * self.shape
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return nrpy.gammp(self.shape, self.shape)
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return nrpy.gammp(self.shape, self.shape - 1)
def ErlangDistribution(location, scale, shape):
"""
Erlang distribution is an alias of Gamma distribution where the shape
parameter is an integer.
@param location:
@param scale:
@param shape:
@status: Tested method
@since: version 0.2
"""
return GammaDistribution(location, scale, int(shape))
def FurryDistribution(location, scale, shape):
"""
Furry distribution is an alias of Gamma distribution.
@param location:
@param scale:
@param shape:
@status: Tested method
@since: version 0.2
"""
return GammaDistribution(location, scale, shape)
class ChiSquareDistribution(GammaDistribution):
"""
Chi-square distribution is a special case of Gamma distribution where
location = 0, scale = 2 and shape is twice that of the degrees of freedom.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, df=2):
"""
Constructor method. The parameters are used to construct
the probability distribution.
@param df: degrees of freedom"""
GammaDistribution.__init__(self, 0, 2, float(df) / 2.0)
class GeometricDistribution(Distribution):
"""
Geometric distribution is the discrete version of Exponential
distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, success=0.5):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param success: probability of success; 0 <= success <= 1;
default = 0.5
"""
self.prob = float(success)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
total = self.PDF(1)
for i in range(2, int(x) + 1):
total += self.PDF(i)
return total
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability
for particular value of x, or the area under probability
distribution from x-h to x+h for continuous distribution.
"""
return self.prob * ((1 - self.prob) ** (x - 1))
def inverseCDF(self, probability, start=1, step=1):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return 1/self.prob
def mode(self):
"""Gives the mode of the sample."""
return 1.0
def variance(self):
"""Gives the variance of the sample."""
return (1 - self.prob) / (self.prob ** 2)
class HypergeometricDistribution(Distribution):
"""
Class for Hypergeometric distribution
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, sample_size,
population_size=100,
population_success=50):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param sample_size: sample size (not more than population size)
@type sample_size: integer
@param population_size: population size; default = 100
@type population_size: integer
@param population_success: number of successes in the population
(cannot be more than population size); default = 10
@type population_success: integer"""
if population_success > population_size:
raise AttributeError('population_success cannot be more \
than population_size')
elif sample_size > population_size:
raise AttributeError('sample_size cannot be more \
than population_size')
else:
self.psize = int(population_size)
self.psuccess = int(population_success)
self.ssize = int(sample_size)
def CDF(self, sample_success):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value (sample_success, an integer that is not more than sample
size) on the x-axis where y-axis is the probability."""
if sample_success > self.ssize:
raise AttributeError('sample_success cannot be more \
than sample_size')
else:
return sum([self.PDF(n) for n in range(1, sample_success+1)])
def PDF(self, sample_success):
"""
Partial Distribution Function, which gives the probability for the
particular value of x (sample_success, an integer that is not more
than sample size), or the area under probability distribution from
x-h to x+h for continuous distribution."""
if sample_success > self.ssize:
raise AttributeError('sample_success cannot be more \
than sample_size')
else:
sample_success = int(sample_success)
numerator = nrpy.bico(self.psuccess, sample_success)
numerator = numerator * nrpy.bico(self.psize-self.psuccess,
self.ssize-sample_success)
denominator = nrpy.bico(self.psize, self.ssize)
return float(numerator)/float(denominator)
def inverseCDF(self, probability, start=1, step=1):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (int(start), cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.ssize * (float(self.psuccess)/float(self.psize))
def mode(self):
"""Gives the mode of the sample."""
temp = (self.ssize + 1) * (self.psuccess + 1)
return float(temp)/float(self.psize + 2)
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
def variance(self):
"""Gives the variance of the sample."""
t1 = float(self.psize-self.psuccess)/float(self.psize)
t2 = float(self.psize-self.ssize)/float(self.psize-1)
return self.mean() * t1 * t2
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
## def random(self, seed):
## """Gives a random number based on the distribution."""
## while 1:
## func
## yield seed
class LogarithmicDistribution(Distribution):
"""
Class for Logarithmic Distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, shape):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param shape: the spread of the distribution"""
self.shape = shape
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
summation = 0.0
for i in range(int(x)): summation = summation + self.PDF(i)
return summation
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (-1 * (self.shape ** x)) / (math.log10(1 - self.shape) * x)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return (-1 * self.shape) / ((1 - self.shape) * \
math.log10(1 - self.shape))
def mode(self):
"""Gives the mode of the sample."""
return 1.0
def variance(self):
"""Gives the variance of the sample."""
n = (-1 * self.shape) * (self.shape + math.log10(1 - self.shape))
d = ((1 - self.shape) ** 2) * math.log10(1 - self.shape) * \
math.log10(1 - self.shape)
return n / d
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class NormalDistribution(Distribution):
"""
Class for standardized normal distribution (area under the curve = 1)
@see: Ling, MHT. 2009. Ten Z-Test Routines from Gopal Kanji's 100
Statistical Tests. The Python Papers Source Codes 1:5
@status: Tested method
@since: version 0.1
"""
def __init__(self):
self.mean = 0.0
self.stdev = 1.0
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return 1.0 - 0.5 * nrpy.erfcc(x/SQRT2)
def PDF(self, x):
"""
Calculates the density (probability) at x by the formula
f(x) = 1/(sqrt(2 pi) sigma) e^-((x^2/(2 sigma^2))
where mu is the mean of the distribution and sigma the standard
deviation.
@param x: probability at x
"""
return (1/(math.sqrt(PI2) * self.stdev)) * \
math.exp(-(x ** 2/(2 * self.stdev**2)))
def inverseCDF(self, probability, start = -10.0,
end = 10.0, error = 10e-8):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis, together with the
cumulative probability.
@param probability: probability under the curve from -infinity
@param start: lower boundary of calculation (default = -10)
@param end: upper boundary of calculation (default = 10)
@param error: error between the given and calculated probabilities
(default = 10e-8)
@return: Returns a tuple (start, cprob) where 'start' is the standard
deviation for the area under the curve from -infinity to the given
'probability' (+/- step). 'cprob' is the calculated area under the
curve from -infinity to the returned 'start'.
"""
# check for tolerance
if abs(self.CDF(start)-probability) < error:
return (start, self.CDF(start))
# case 1: lower than -10 standard deviations
if probability < self.CDF(start):
return self.inverseCDF(probability, start-5, start, error)
# case 2: between -10 to 10 standard deviations (bisection method)
if probability > self.CDF(start) and \
probability < self.CDF((start+end)/2):
return self.inverseCDF(probability, start, (start+end)/2, error)
if probability > self.CDF((start+end)/2) and \
probability < self.CDF(end):
return self.inverseCDF(probability, (start+end)/2, end, error)
# case 3: higher than 10 standard deviations
if probability > self.CDF(end):
return self.inverseCDF(probability, end, end+5, error)
def mean(self):
return self.mean
def mode(self):
return self.mean
def kurtosis(self):
return 0.0
def skew(self):
return 0.0
def variance(self):
return self.stdev * self.stdev
def random(self):
"""Gives a random number based on the distribution."""
return random.gauss(self.mean, self.stdev)
class PoissonDistribution(Distribution):
"""
Class for Poisson Distribution. Poisson distribution is binomial
distribution with very low success - that is, for rare events.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, expectation=0.001):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param expectation: mean success probability; S{lambda}
"""
self._mean = float(expectation)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return nrpy.cdf_poisson(x + 1, self._mean)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability
for particular value of x, or the area under probability
distribution from x-h to x+h for continuous distribution.
"""
return (math.exp(-1 ** self._mean) *
(self._mean ** x)) / nrpy.factrl(x)
def inverseCDF(self, probability, start=0.001, step=1):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self._mean
def mode(self):
"""Gives the mode of the sample."""
return int(self._mean)
def variance(self):
"""Gives the variance of the sample."""
return self._mean
class SemicircularDistribution(Distribution):
"""
Class for Semicircular Distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, location=0.0, scale=1.0):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location: mean of the distribution, default = 0.0
@param scale: spread of the distribution, default = 1.0"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
t = (x - self.location) / self.scale
return 0.5 + (1 / PI) * (t * math.sqrt(1 - (t ** 2)) + math.asin(t))
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (2 / (self.scale * PI)) * \
math.sqrt(1 - ((x - self.location) / self.scale) ** 2)
def inverseCDF(self, probability, start=-10.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
if start < -1 * self.scale:
start = -1 * self.scale
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location
def mode(self):
"""Gives the mode of the sample."""
return self.location
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return -1.0
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return 0.25 * (self.scale ** 2)
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location - (0.404 * self.scale)
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return self.location + (0.404 * self.scale)
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.5
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class TDistribution(Distribution):
"""
Class for Student's t-distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, location=0.0, scale=1.0, shape=2):
"""Constructor method. The parameters are used to construct
the probability distribution.
@param location: default = 0.0
@param scale: default = 1.0
@param shape: degrees of freedom; default = 2"""
self._mean = float(location)
self.stdev = float(scale)
self.df = float(shape)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
t = (x - self._mean) / self.stdev
a = nrpy.betai(self.df / 2.0, 0.5, self.df / (self.df + (t * t)))
if t > 0:
return 1 - 0.5 * a
else:
return 0.5 * a
def PDF(self, x):
"""
Calculates the density (probability) at x with n-th degrees of freedom
as
M{f(x) = S{Gamma}((n+1)/2) /
(sqrt(n * pi) S{Gamma}(n/2)) (1 + x^2/n)^-((n+1)/2)}
for all real x. It has mean 0 (for n > 1) and variance n/(n-2)
(for n > 2)."""
a = nrpy.gammln((self.df + 1) / 2)
b = math.sqrt(math.pi * self.df) * nrpy.gammln(self.df / 2) * \
self.stdev
c = 1 + ((((x - self._mean) / self.stdev) ** 2) / self.df)
return (a / b) * (c ** ((-1 - self.df) / 2))
def inverseCDF(self, probability, start = -10.0,
end = 10.0, error = 10e-8):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis, together with the
cumulative probability.
@param probability: probability under the curve from -infinity
@param start: lower boundary of calculation (default = -10)
@param end: upper boundary of calculation (default = 10)
@param error: error between the given and calculated probabilities
(default = 10e-8)
@return: Returns a tuple (start, cprob) where 'start' is the standard
deviation for the area under the curve from -infinity to the given
'probability' (+/- step). 'cprob' is the calculated area under the
curve from -infinity to the returned 'start'.
"""
# check for tolerance
if abs(self.CDF(start)-probability) < error:
return (start, self.CDF(start))
# case 1: lower than -10 standard deviations
if probability < self.CDF(start):
return self.inverseCDF(probability, start-10, start, error)
# case 2: between -10 to 10 standard deviations (bisection method)
if probability > self.CDF(start) and \
probability < self.CDF((start+end)/2):
return self.inverseCDF(probability, start, (start+end)/2, error)
if probability > self.CDF((start+end)/2) and \
probability < self.CDF(end):
return self.inverseCDF(probability, (start+end)/2, end, error)
# case 3: higher than 10 standard deviations
if probability > self.CDF(end):
return self.inverseCDF(probability, end, end+10, error)
# cprob = self.CDF(start)
# if probability < cprob:
# return (start, cprob)
# while probability > cprob:
# start = start + step
# cprob = self.CDF(start)
# return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self._mean
def mode(self):
"""Gives the mode of the sample."""
return self._mean
def kurtosis(self):
"""Gives the kurtosis of the sample."""
a = ((self.df - 2) ** 2) * nrpy.gammln((self.df / 2) - 2)
return 3 * ((a / (4 * nrpy.gammln(self.df / 2))) - 1)
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return (self.df / (self.df - 2)) * self.stdev * self.stdev
class TriangularDistribution(Distribution):
"""
Class for Triangular Distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, upper_limit, peak, lower_limit=0):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param upper_limit: upper limit of the distrbution
@type upper_limit: float
@param peak: peak of the distrbution, which has to be between
the lower and upper limits of the distribution
@type peak: float
@param lower_limit: lower limit of the distrbution,
default = 0
@type lower_limit: float"""
self.lower_limit = lower_limit
if upper_limit < self.lower_limit:
raise AttributeError
else:
self.upper_limit = upper_limit
if peak > upper_limit:
raise AttributeError
if peak < lower_limit + 0.001:
raise AttributeError
else:
self.mode = peak
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
if x < self.lower_limit:
raise AttributeError
if x > self.mode:
raise AttributeError
else:
return (( x - self.lower_limit) ** 2) / \
((self.upper_limit - self.lower_limit) * \
(self.mode - self.lower_limit))
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
if x < self.lower_limit:
raise AttributeError
if x > self.mode:
raise AttributeError
else:
return ((2 * (x - self.lower_limit)) / \
((self.upper_limit - self.lower_limit) * \
(self.mode - self.lower_limit)))
def inverseCDF(self, probability, start=0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
start = self.lower_limit
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return (float(self.lower_limit + self.upper_limit + self.mode) / 3)
def mode(self):
"""Gives the mode of the sample."""
return (self.mode)
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return ((-3)*(5 ** - 1))
def skew(self):
"""Gives the skew of the sample."""
return (math.sqrt(2) * (self.lower_limit + self.upper_limit - 2 * \
self.mode) * (2 * self.lower_limit - self.self.upper_limit - self.mode) \
* (self.lower_limit - 2 * self.upper_limit + self.mode)) \
/ (self.lower_limit ** 2 + self.upper_limit ** 2 + self.mode ** 2 - \
self.lower_limit * self.upper_limit + self.mode ** 2 - self.lower_limit * \
(self.upper_limit - self.mode))
def variance(self):
"""Gives the variance of the sample."""
return (self.lower_limit ** 2 + self.upper_limit ** 2 + self.mode ** 2\
- (self.lower_limit * self.upper_limit) - \
(self.lower_limit * self.mode) - (self.upper_limit * self.mode))\
*(18 ** -1)
def quantile1(self):
"""Gives the 1st quantile of the sample."""
if ((self.mode - self.lower_limit) * \
(self.upper_limit - self.lower_limit) ** -1) > 0.25:
return self.lower_limit + (0.5 * math.sqrt((self.upper_limit - \
self.lower_limit) * (self.mode - self.lower_limit)))
else:
return self.upper_limit - ((0.5) * math.sqrt (3 * (self.upper_limit -\
self.lower_limit) * (self.upper_limit - self.mode)))
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
if ((self.mode - self.lower_limit) * \
(self.upper_limit - self.lower_limit) ** -1) > 0.75:
return self.lower_limit + (0.5 * math.sqrt(3 * (self.upper_limit - \
self.lower_limit) * (self.mode - self.lower_limit)))
else:
return self.upper_limit - ((0.5) * math.sqrt ((self.upper_limit -\
self.lower_limit) * (self.upper_limit - self.mode)))
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
if self.mode > ((self.lower_limit + self.upper_limit) * 0.5):
return ((self.upper_limit + self.mode - 2 * self.lower_limit) ** 2)\
* (9 * (self.upper_limit - self.lower_limit) * (self.mode - \
self.lower_limit))
else:
return (self.lower_limit ** 2 + (5 * self.lower_limit * \
self.upper_limit) - (5 * (self.upper_limit ** 2)) - \
(7 * self.lower_limit * self.mode) + (5 * self. upper_limit * \
self.mode) + self.mode ** 2)
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return (self.mode - self.lower_limit) * (self.upper_limit \
- self.lower_limit) ** - 1
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class UniformDistribution(Distribution):
"""
Class for Uniform distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, location, scale):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale:
"""
self.location = float(location)
self.scale = float(scale)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return (x - self.location) / (self.scale - self.location)
def PDF(self):
"""
Partial Distribution Function, which gives the probability
for particular value of x, or the area under probability
distribution from x-h to x+h for continuous distribution.
"""
return 1.0 / (self.scale - self.location)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return (self.location + self.scale) / 2.0
def median(self):
"""Gives the median of the sample."""
return (self.location + self.scale) / 2
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return -1.2
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return ((self.scale - self.location) ** 2) / 12
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return ((3 * self.location) + self.scale) / 4
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return (self.location + (3 * self.scale)) / 4
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5
def random(self, lower, upper):
"""Gives a random number based on the distribution."""
return random.uniform(lower, upper)
class WeiBullDistribution(Distribution):
"""
Class for Weibull distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, location=1.0, scale=1.0):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param location: position of the distribution, default = 1.0
@param scale: shape of the distribution, default = 1.0"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0
to a give x-value on the x-axis where y-axis is the probability."""
power = -1 * ((float(x) / self.location) ** self.scale)
return 1 - (math.e ** power)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under trobability distribution
from x-h to x+h for continuous distribution."""
if x < 0:
return 0
else:
power = -1 * ((float(x) / self.location) ** self.scale)
t3 = math.e ** power
t2 = (float(x) / self.location) ** (self.scale - 1)
t1 = self.scale / self.location
return t1 * t2 * t3
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# return self.location * nrpy.gammln(1 + 1/self.scale)
def median(self):
"""Gives the median of the sample."""
return self.location * (math.log(2, math.e) ** (1/float(self.scale)))
def mode(self):
"""Gives the mode of the sample."""
if self.scale > 1:
t = ((self.scale - 1) / float(self.scale))
return self.location * (t ** (1/float(self.scale)))
else:
return 0
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
def random(self):
"""Gives a random number based on the distribution."""
return random.weibullvariate(self.scale, self.shape)
def FrechetDistribution(**parameters):
"""
Frechet distribution is an alias of Weibull distribution."""
return WeibullDistribution(**parameters)
# ----------------------------------------------------------
# Untested Distributions
# ----------------------------------------------------------
def AntiLogNormalDistribution(**parameters):
"""
Anti-Lognormal distribution is an alias of Lognormal distribution."""
return LogNormalDistribution(**parameters)
class BernoulliDistribution(Distribution):
"""
Bernoulli distribution is a special case of Binomial distribution where
where number of trials = 1
"""
def __init__(self, success):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param success: probability of success; 0 <= success <= 1"""
self.distribution = BinomialDistribution(success, trial = 1)
def CDF(self, x):
"""Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start = 0, step = 1):
"""It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
# def random(self):
# """Gives a random number based on the distribution."""
# return self.distribution.random()
def BilateralExponentialDistribution(**parameters):
"""
Bilateral Exponential distribution is an alias of Laplace distribution."""
return LaplaceDistribution(**parameters)
class BradfordDistribution(Distribution):
"""Class for Bradford distribution."""
def __init__(self, location, scale, shape):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale: upper bound
@param shape:"""
self.location = location
self.scale = scale
self.shape = shape
self.k = math.log10(self.shape + 1)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
r = ((self.shape * (x - self.location)) / (self.scale - self.location))
return math.log10(1 + r) / self.k
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
r = (self.shape * (x - self.location)) + self.scale - self.location
return self.shape / (self.k * r)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
r = self.shape * (self.scale - self.location)
r = r + (((self.shape + 1) * self.location - self.scale) * self.k)
return r / (self.shape * self.k)
def mode(self):
"""Gives the mode of the sample."""
return self.location
def kurtosis(self):
"""Gives the kurtosis of the sample."""
d = ((self.shape * (self.k - 2)) + (2 * self.k)) ** 2
d = 3 * self.shape * d
n = ((self.k * ((3 * self.k) - 16)) + 24)
n = (self.shape ** 3) * (self.k - 3) * n
n = n + ((self.k - 4) * (self.k - 3) * (12 * self.k * (self.k **2)))
n = n + (6 * self.k * (self.k **2)) * ((3 * self.k) - 14)
return (n + (12 * (self.k ** 3))) / d
def skew(self):
"""Gives the skew of the sample."""
r = 12 * (self.shape ** 2)
r = r - (9 * self.k * self.shape * (self.shape + 2))
r = r + ((2 * self.k * self.k) * ((self.shape * (self.shape + 3)) + 3))
d = self.shape * (((self.k - 2) * self.shape) + (2 * self.k))
d = math.sqrt(d)
d = d * ((3 * self.shape * (self.k - 2)) + (6 * self.k))
return r / d
def variance(self):
"""Gives the variance of the sample."""
r = (self.scale - self.location) ** 2
r = r * (self.shape * (self.k - 2) + (2 * self.k))
return r / (2 * self.shape * self.k * self.k)
def quantile1(self):
"""Gives the 1st quantile of the sample."""
r = (self.location * (self.shape + 1)) - self.scale
r = r + ((self.scale - self.location) * ((self.shape + 1)** 0.25))
return r / self.shape
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
r = (self.location * (self.shape + 1)) - self.scale
r = r + ((self.scale - self.location) * ((self.shape + 1)** 0.75))
return r / self.shape
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
r = math.log10(self.shape / math.log10(self.shape + 1))
return r / math.log10(self.shape + 1)
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.0
def random(self, seed):
"""Gives a random number based on the distribution."""
while 1:
r = self.location * (self.shape + 1) - self.scale
r = r + ((self.scale - self.location)*((self.shape + 1) ** seed))
seed = r / self.shape
yield seed
class BurrDistribution(Distribution):
"""
Burr distribution is the generalization of Fisk distribution. Burr
distribution with D = 1 becomes Fisk distribution.
"""
def __init__(self, location, scale, C, D):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale:
@param C: shape
@param D: shape"""
self.location = location
self.scale = scale
self.C = C
self.D = D
self.k = (nrpy.gammln(self.D) * \
nrpy.gammln(1 - (2/self.C)) * \
nrpy.gammln((2/self.C) + self.D)) - \
((nrpy.gammln(1 - (1/self.C)) ** 2) * \
(nrpy.gammln((1/self.C) + self.D) ** 2))
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return (1+(((x - self.location)/self.scale)**(-self.C)))**(-self.D)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
r = (1+(((x - self.location)/self.scale)**(-self.C)))**(-self.D - 1)
r = r * ((self.C * self.D)/self.scale)
return r * (((x - self.location)/self.scale)**(-self.C - 1))
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
r = nrpy.gammln(1 - (1/self.C)) * nrpy.gammln((1/self.C) + self.D)
return self.location + ((r * self.scale) / nrpy.gammln(self.D))
def mode(self):
"""Gives the mode of the sample."""
if ((self.C * self.D) < 1): return self.location
else:
r = (((self.C * self.D)-1)/(self.C + 1)) ** (1/self.C)
return self.location + (self.scale * r)
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
def variance(self):
"""Gives the variance of the sample."""
return (self.k * (self.scale ** 2)) / (nrpy.gammln(self.D) ** 2)
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
def qmode(self):
"""Gives the quantile of the mode of the sample."""
if ((self.C * self.D) < 1): return 0.0
else:
return (1 + ((self.C+1)/((self.C*self.D) - 1))) ** (-1*self.D)
def random(self, seed):
"""Gives a random number based on the distribution."""
while 1:
r = ((1/(seed ** (1/self.D))) - 1) ** (-1/self.C)
seed = self.location + self.scale * r
yield seed
class ChiDistribution(Distribution):
"""Class for Chi distribution."""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def CobbDouglasDistribution(**parameters):
"""
Cobb-Douglas distribution is an alias of Lognormal distribution."""
return LogNormalDistribution(**parameters)
def DoubleExponentialDistribution(**parameters):
"""
Double Exponential distribution is an alias of Laplace distribution."""
return LaplaceDistribution(**parameters)
class DoubleGammaDistribution(Distribution):
"""
Double Gamma distribution is the signed version of Gamma distribution.
"""
def __init__(self, location, scale, shape):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale:
@param shape:"""
self.location = location
self.scale = scale
self.shape = shape
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
r = nrpy.gammp(self.shape ,abs((x - self.location)/self.scale))
if x > self.location: return 0.5 + (0.5 * r)
else: return 0.5 - (0.5 * r)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
r = math.exp(-1 * abs((x - self.location)/self.scale))
r = r * (abs((x - self.location)/self.scale) ** (self.shape -1))
return r / (2 * self.scale * nrpy.gammln(self.shape))
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return self.shape * (self.shape + 1) * (self.scale ** 2)
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class DoubleWeibullDistribution(Distribution):
"""
Double Weibull distribution is the signed version of Weibull distribution.
"""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class ExtremeLBDistribution(Distribution):
"""Class for Extreme LB distribution."""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class FiskDistribution(Distribution):
"""Class for Fisk distribution."""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def FisherTippettDistribution(location, scale):
"""
Fisher-Tippett distribution is an alias of Gumbel distribution.
@param location: S{eta}
@param scale: S{theta}"""
return GumbelDistribution(location, scale)
class FoldedNormalDistribution(Distribution):
"""Class for Folded Normal distribution."""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class GenLogisticDistribution(Distribution):
"""
Generalized Logistic distribution is a generalization of Logistic
distribution. It becomes Logistic distribution when shape = 1
"""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def GompertzDistribution(location, scale):
"""
Gompertz distribution is an alias of Gumbel distribution.
@param location: S{eta}
@param scale: S{theta}"""
return GumbelDistribution(location, scale)
class GumbelDistribution(Distribution):
"""Class for Gumbel Distribution."""
def __init__(self, location, scale):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param location: S{eta}
@param scale: S{theta}"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return math.exp(-1 * math.exp((self.location - x) / self.scale))
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (1/self.scale) * math.exp((self.location - x) / self.scale) * \
self.CDF(x)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location + (GAMMA * self.scale)
def mode(self):
"""Gives the mode of the sample."""
return self.location
def median(self):
"""Gives the median of the sample."""
return self.location - self.scale * math.log10(math.log10(2))
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return 2.4
def skew(self):
"""Gives the skew of the sample."""
return 1.1395
def variance(self):
"""Gives the variance of the sample."""
return 1.667 * ((PI * self.scale) ** 2)
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location - self.scale * math.log10(math.log10(4))
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return self.location - self.scale * math.log10(math.log10(1.333))
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5704
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.3679
def random(self, seed):
"""Gives a random number based on the distribution."""
while 1:
seed = self.location - \
(self.scale * math.log10(-1 * math.log10(seed)))
yield seed
class HalfNormalDistribution(Distribution):
"""
Half Normal distribution is a special case of Chi distribution where
shape (also degrees of freedom) = 1, and Folded Normal distribution
where location = 0
"""
def __init__(self, **parameters):
"""Constructor method. The parameters are used to construct the
probability distribution."""
try: self.distribution = ChiDistribution(location =
parameters['location'],
scale = parameters['scale'],
shape = 1)
except KeyError:
raise DistributionParameterError('Halfnormal distribution \
requires location and scale parameters')
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
class HyperbolicSecantDistribution(Distribution):
"""Class for Hyperbolic Secant Distribution."""
def __init__(self, location, scale):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale:"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return (2 / PI) * \
(1 / math.tan(math.exp((x - self.location) / self.scale)))
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
return (1 / math.cosh((x - self.location) / self.scale)) / \
(PI * math.scale)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location
def mode(self):
"""Gives the mode of the sample."""
return self.location
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return 2.0
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return 0.25 * ((PI * self.scale) ** 2)
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.5
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class LaplaceDistribution(Distribution):
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class LogisticDistribution(Distribution):
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def LogLogisticDistribution(**parameters):
"""
Log-Logistic distribution is an alias of Fisk distribution."""
return FiskDistribution(**parameters)
class LogNormalDistribution(Distribution):
def __init__(self, a, b):
"""Constructor method. The parameters are used to construct the
probability distribution."""
self.location = a
self. scale = b
if (b ** 2) < 0:
raise AttributeError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0
# to a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return math.exp((self.location + (self.scale ** 2) * self.location*(-1)))
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
def random(self):
"""Gives a random number based on the distribution."""
return random.lognormalvariate(self.location, self.scale)
def LogWeibullDistribution(location, scale):
"""
Log-Weibull distribution is an alias of Gumbel distribution.
@param location: S{eta}
@param scale: S{theta}"""
return GumbelDistribution(location, scale)
def LorentzDistribution(**parameters):
"""
Lorentz distribution is an alias of Cauchy distribution."""
return CauchyDistribution(**parameters)
class MaxwellDistribution(Distribution):
"""
Maxwell distribution is a special case of Chi distribution where
location = 0 and shape (degrees of freedom) = 3
"""
def __init__(self, scale):
"""
Constructor method.
@param scale:"""
self.distribution = ChiDistribution(0, scale, 3)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
# def random(self):
# """Gives a random number based on the distribution."""
# return self.distribution.random()
class NakagamiDistribution(Distribution):
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class NegativeBinomialDistribution(Distribution):
"""Class for Negative Binomial Distribution."""
def __init__(self, success, target):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param success: probability of success; 0 <= success <= 1
@param target: a constant, target number of successes"""
self.success = success
self.target = target
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
summation = 0.0
for i in range(x): summation = summation + self.PDF(i)
return summation
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return nrpy.bico(x - 1, self.target - 1) * \
(self.success ** self.target) * \
((1 - self.success) ** (x - self.target))
def inverseCDF(self, probability, start = 0, step = 1):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.target / self.success
def mode(self):
"""Gives the mode of the sample."""
return int((self.success + self.target - 1)/self.success)
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def NegativeExponentialDistribution(**parameters):
"""
Negative-exponential distribution is an alias of Exponential distribution."""
return ExponentialDistribution(**parameters)
class ParetoDistribution(Distribution):
"""Class for Pareto Distribution."""
def __init__(self, location=1.0, scale=1.0):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param location: also the scale; default = 1.0
@param scale: S{lambda}; default = 1.0"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return 1 - (self.location/x) ** self.scale
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (self.shape * (self.location ** self.scale)) / \
(x ** (self.scale + 1))
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return (self.location * self.scale) / (self.scale - 1)
def mode(self):
"""Gives the mode of the sample."""
return self.location
def median(self):
"""Gives the median of the sample."""
return self.location * (2 ** (1/self.scale))
def kurtosis(self):
"""Gives the kurtosis of the sample."""
n = 6 * (self.scale ** 3 + self.scale ** 2 + 6 * self.scale - 2)
d = self.scale * (self.scale ** 2 - 7 * self.scale + 12)
return n/d
def skew(self):
"""Gives the skew of the sample."""
n = 2 * (self.scale + 1) * math.sqrt(self.scale - 2)
d = (self.scale - 3) * math.sqrt(self.scale)
return n/d
def variance(self):
"""Gives the variance of the sample."""
n = (self.location ** 2) * self.scale
d = (self.scale - 2) * ((self.scale - 1) ** 2)
return n/d
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location * (1.333 ** (1/self.scale))
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return self.location * (4 ** (1/self.scale))
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 1 - (((self.scale - 1) / self.scale) ** self.scale)
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.0
def random(self):
"""Gives a random number based on the distribution."""
return random.paretovariate(self.scale)
class PascalDistribution(Distribution):
"""
Class for Pascal Distribution. Pascal Distribution is a form of Negative
Binomial Distribution where the 'target' is an integer
"""
def __init__(self, success, target):
"""Constructor method.
@param success: probability of success; 0 <= success <= 1
@param target: a constant, target number of successes"""
self.distribution = NegativeBinomialDistribution(success, int(target))
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start = 0.0, step =0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
# def random(self):
# """Gives a random number based on the distribution."""
# return self.distribution.random()
def PolyaDistribution(success, target):
"""
Polya distribution is an alias of Negative Binomial distribution.
@param success: probability of success; 0 <= success <= 1
@param target: a constant, target number of successes
"""
return NegativeBinomialDistribution(success, target)
class PowerFunctionDistribution(Distribution):
"""
Class for Power Function Distribution. It is a form of Beta Distribution.
"""
def __init__(self, shape):
"""Constructor method.
@param shape:
"""
self.distribution = BetaDistribution(0, 1, shape, 1)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
# def random(self):
# """Gives a random number based on the distribution."""
# return self.distribution.random()
class RademacherDistribution(Distribution):
"""Class for Rademacher Distribution."""
def __init__(self):
"""Constructor method."""
pass
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
if x < -1:
return 0.0
elif x > -1 and x < 1:
return 0.5
else: return 1.0
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
if x == -1 or x == 1: return 0.5
else: return 0.0
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
if probability == 0.0: return (-1.0001, 0.0)
if probability == 1.0: return (1.0, 1.0)
else: return (0.999, 0.5)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return 0
def skew(self):
"""Gives the skew of the sample."""
return 0
def variance(self):
"""Gives the variance of the sample."""
return 1
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class RayleighDistribution(Distribution):
"""
Rayleigh distribution is a special case of Chi distribution where
location = 0 and shape (degrees of freedom) = 2
"""
def __init__(self, scale):
"""Constructor method.
@param scale:"""
self.distribution = ChiDistribution(0, scale, 2)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start = 0.0, step =0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
# def random(self):
# """Gives a random number based on the distribution."""
# return self.distribution.random()
class ReciprocalDistribution(Distribution):
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def RectangularDistribution(**parameters):
"""
Rectangular distribution is an alias of Uniform distribution."""
return UniformDistribution(**parameters)
def SechSquaredDistribution(**parameters):
"""
Sech-squared distribution is an alias of Logistic distribution."""
return LogisticDistribution(**parameters)
def WaldDistribution(**parameters):
"""
Wald distribution is an alias of Inverse Normal distribution."""
return InverseNormalDistribution(**parameters)
#class DummyDistribution(Distribution):
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
# def inverseCDF(self, probability, start=0.0, step=0.01):
# """
# It does the reverse of CDF() method, it takes a probability value
# and returns the corresponding value on the x-axis."""
# cprob = self.CDF(start)
# if probability < cprob: return (start, cprob)
# while (probability > cprob):
# start = start + step
# cprob = self.CDF(start)
# # print start, cprob
# return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
## def random(self, seed):
## """Gives a random number based on the distribution."""
## while 1:
## func
## yield seed
``` |
[
{
"content": "Repeat the following code:\n```python\n\"\"\"\nContains the house price model.\n\nDON'T USE THIS MODEL! Use the HousePriceModel in house_price_model_2.py.\n\"\"\"\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model i... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n\"\"\"\nContains the house price model.\n\nDON'T USE THIS MODEL! Use the HousePriceModel in house_price_model_2.py.\n\"\"\"\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklear... | ```python
"""
Contains the house price model.
DON'T USE THIS MODEL! Use the HousePriceModel in house_price_model_2.py.
"""
import os
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import RidgeCV
# Constants
BASE_DATE = pd.to_datetime('20140101', format='%Y%m%d', errors='ignore')
TO_TYPE = 'category'
# Note: It is expected that the following environment variables will be set so
# that the house price model will be able to locate its training data:
#
# SALES_DATA_PATH: The path of the sales data training file, e.g.: "~/directory"
# SALES_DATA_FILE: The name of the sales data training file, e.g.: "File.csv"
#
# os.environ["SALES_DATA_PATH"] = '~/UW Data Science/DATA 515A/Project'
# os.environ["SALES_DATA_FILE"] = 'Merged_Data_excel.csv' # 'KingCountyHomeSalesData.csv'
# Construct the sales data path, and read the sales data.
SALES_DATA_PATH = os.path.join(os.environ['SALES_DATA_PATH'], os.environ['SALES_DATA_FILE'])
SALES_DATA = pd.read_csv(SALES_DATA_PATH, parse_dates=['date'])
# Data cleansing plan:
#
# id: Discard
# date: Convert to integer; make categorical
# price: No conversion
# bedrooms: No conversion
# bathrooms: No conversion
# sqft_living: No conversion
# sqft_lot: No conversion
# floors: Make categorical
# waterfront: Make categorical
# view: Make categorical
# condition: Make categorical
# grade: Make categorical
# sqft_above: No conversion
# sqft_basement: No conversion
# yr_built: Make categorical
# yr_renovated: Copy over yr_built if missing; make categorical
# zipcode: Make categorical
# lat: No conversion
# long: No conversion
# sqft_living15 No conversion
# sqft_lot15 No conversion
# list_price No conversion
def construct_models():
"""
Constructs a ridge regression model, and a random forest model for housing
price data.
:return: A ridge regression model, and a random forest model for housing
price data
"""
return train_models(create_model_data_frame(SALES_DATA))
def create_model_data_frame(source):
"""
Creates a data frame suitable for constructing a model.
:param source: The source data frame
:return: A data frame suitable for constructing a model
"""
# Create an empty data frame. Get the date series from the source.
my_model_data = pd.DataFrame()
sales_date = source['date']
# Extract the sales date as an integer.
my_model_data['sale_day'] =\
(sales_date - get_base_date()).astype('timedelta64[D]').astype(int) + 1
# Extract the sale day-of-week as an integer, and the sale day in month.
my_model_data['sale_day_of_week'] = sales_date.dt.dayofweek.astype(TO_TYPE)
my_model_data['sale_day_in_month'] = sales_date.dt.day.astype(TO_TYPE)
# Extract common features as numeric, or categorical values.
# create_model_feature(my_model_data, source, 'price', False)
create_model_feature(my_model_data, source, 'price', False)
create_model_feature(my_model_data, source, 'bedrooms', False)
create_model_feature(my_model_data, source, 'bathrooms', False)
create_model_feature(my_model_data, source, 'sqft_living', False)
create_model_feature(my_model_data, source, 'sqft_lot', False)
create_model_feature(my_model_data, source, 'floors', True)
create_model_feature(my_model_data, source, 'waterfront', True)
create_model_feature(my_model_data, source, 'view', True)
create_model_feature(my_model_data, source, 'condition', True)
create_model_feature(my_model_data, source, 'grade', True)
create_model_feature(my_model_data, source, 'sqft_above', False)
create_model_feature(my_model_data, source, 'sqft_basement', False)
create_model_feature(my_model_data, source, 'yr_built', True)
# Use 'year built' in place of 'year renovated' if year renovated is zero
# in the source.
field_name = 'yr_renovated'
my_model_data[field_name] = pd.Categorical(np.where(
source[field_name] == 0,
source['yr_built'].astype(TO_TYPE),
source[field_name].astype(TO_TYPE)))
# Extract more common features as numeric, or categorical values.
create_model_feature(my_model_data, source, 'zipcode', True)
create_model_feature(my_model_data, source, 'lat', False)
create_model_feature(my_model_data, source, 'long', False)
create_model_feature(my_model_data, source, 'sqft_living15', False)
create_model_feature(my_model_data, source, 'sqft_lot15', False)
my_model_data['list_price'] = source['List price']
# Return the completed model data frame.
return my_model_data
def create_model_feature(destination, source, name, to_categorical=False):
"""
Creates a feature in a destination data frame.
:param destination: The destination data frame
:param source: The source data frame
:param name: The name of the feature to copy
:param to_categorical: True if the feature should be converted to
categorical, false otherwise
:return: None
"""
if to_categorical:
destination[name] = source[name].astype(TO_TYPE)
else:
destination[name] = source[name]
return None
def get_base_date():
"""
Gets the base date as a reference for day of sale.
:return: The base date as a reference for day of sale
"""
return BASE_DATE
def train_models(my_model_data):
"""
Trains a ridge regression model, and a random forest model, and returns
them.
:param my_model_data: The model data on which to train
:return: A ridge regression model, and a random forest model
"""
# Construct the ridge regression model.
my_ridge_model = RidgeCV(alphas=(0.1, 1.0, 10.0),
fit_intercept=True,
normalize=True,
scoring=None,
cv=None,
gcv_mode=None,
store_cv_values=True)
# Construct the random forest model.
my_forest_model = RandomForestRegressor()
# Divide the model data into predictor and response.
response_field = 'price'
predictors = my_model_data.ix[:, response_field != my_model_data.columns]
response = my_model_data[response_field]
# Fit the models, and return them.
my_ridge_model.fit(X=predictors, y=response)
my_forest_model.fit(X=predictors, y=response)
return my_ridge_model, my_forest_model
``` |
[
{
"content": "```python\n###############################################################################\r\n##\r\n## Copyright 2011-2013 Tavendo GmbH\r\n##\r\n## Licensed under the Apache License, Version 2.0 (the \"License\");\r\n## you may not use this file except in compliance with the License.\r\n## You... | [
{
"content": "<|memory_start|>```python\n###############################################################################\r\n##\r\n## Copyright 2011-2013 Tavendo GmbH\r\n##\r\n## Licensed under the Apache License, Version 2.0 (the \"License\");\r\n## you may not use this file except in compliance with the Lic... | ```python
###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from _version import __version__
version = __version__ # backward compat.
import util
import useragent
import flashpolicy
import httpstatus
import utf8validator
import xormasker
import compress
import websocket
## disable import, since it leads to reactor import
## https://twistedmatrix.com/trac/ticket/6849
#import resource
import prefixmap
import wamp
``` |
[
{
"content": "Here is a code file:\n```python\n# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_urllib_parse,\n compat_urllib_request,\n)\n\n\nclass StreamcloudIE(InfoExtractor):\n IE_NAME = 'streamcloud.eu'\n ... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_urllib_parse,\n compat_urllib_request,\n)\n\n\nclass StreamcloudIE(InfoExtractor):\n IE_NAME = 'stre... | ```python
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
class StreamcloudIE(InfoExtractor):
IE_NAME = 'streamcloud.eu'
_VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?'
_TEST = {
'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
'md5': '6bea4c7fa5daaacc2a946b7146286686',
'info_dict': {
'id': 'skp9j99s4bpz',
'ext': 'mp4',
'title': 'ananse test video \'/\\ ä ↭',
},
'skip': 'Only available from the EU'
}
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://streamcloud.eu/%s' % video_id
orig_webpage = self._download_webpage(url, video_id)
fields = re.findall(r'''(?x)<input\s+
type="(?:hidden|submit)"\s+
name="([^"]+)"\s+
(?:id="[^"]+"\s+)?
value="([^"]*)"
''', orig_webpage)
post = compat_urllib_parse.urlencode(fields)
self._sleep(12, video_id)
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
}
req = compat_urllib_request.Request(url, post, headers)
webpage = self._download_webpage(
req, video_id, note='Downloading video page ...')
title = self._html_search_regex(
r'<h1[^>]*>([^<]+)<', webpage, 'title')
video_url = self._search_regex(
r'file:\s*"([^"]+)"', webpage, 'video URL')
thumbnail = self._search_regex(
r'image:\s*"([^"]+)"', webpage, 'thumbnail URL', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
}
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n###\n# Copyright (c) 2011, Rodrigo Dias Cruz\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n###\n# Copyright (c) 2011, Rodrigo Dias Cruz\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Re... | ```python
###
# Copyright (c) 2011, Rodrigo Dias Cruz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
class Spam(callbacks.Plugin):
"""
Add the help for "@plugin help Spam" here
This should describe *how* to use this plugin.
"""
threaded = True
def spam(self, irc, msg, args):
"""
List the machines reserved by user
"""
# no args
if len(msg) == 0:
return
# no channel
channel = irc.state.channels.get(msg.args[0], None)
if channel is None:
return
# get channel users
users = list(channel.users)
users.sort()
# reply to them
irc.reply('%s: %s' % (', '.join(users), ' '.join(args)), prefixNick=False)
# spam()
Class = Spam
``` |
[
{
"content": "Return the code unaltered:\n```python\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Series\nimport pandas.util.testing as tm\n\n\n@pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']])\ndef test_duplicated_with_misspelled_column_name(subset):\n # GH 19730\n df = D... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Series\nimport pandas.util.testing as tm\n\n\n@pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']])\ndef test_duplicated_with_misspelled_column_name(subset):\n # GH 1... | ```python
import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas.util.testing as tm
@pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']])
def test_duplicated_with_misspelled_column_name(subset):
# GH 19730
df = DataFrame({'A': [0, 0, 1],
'B': [0, 0, 1],
'C': [0, 0, 1]})
with pytest.raises(KeyError):
df.duplicated(subset)
with pytest.raises(KeyError):
df.drop_duplicates(subset)
@pytest.mark.slow
def test_duplicated_do_not_fail_on_wide_dataframes():
# gh-21524
# Given the wide dataframe with a lot of columns
# with different (important!) values
data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000)
for i in range(100)}
df = DataFrame(data).T
result = df.duplicated()
# Then duplicates produce the bool Series as a result and don't fail during
# calculation. Actual values doesn't matter here, though usually it's all
# False in this case
assert isinstance(result, Series)
assert result.dtype == np.bool
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_keep(keep, expected):
df = DataFrame({'A': [0, 1, 1, 2, 0], 'B': ['a', 'b', 'b', 'c', 'a']})
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH#21720; nan/None falsely considered equal")
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_nan_none(keep, expected):
df = DataFrame({'C': [np.nan, 3, 3, None, np.nan]}, dtype=object)
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('keep', ['first', 'last', False])
@pytest.mark.parametrize('subset', [None, ['A', 'B'], 'A'])
def test_duplicated_subset(subset, keep):
df = DataFrame({'A': [0, 1, 1, 2, 0],
'B': ['a', 'b', 'b', 'c', 'a'],
'C': [np.nan, 3, 3, None, np.nan]})
if subset is None:
subset = list(df.columns)
elif isinstance(subset, str):
# need to have a DataFrame, not a Series
# -> select columns with singleton list, not string
subset = [subset]
expected = df[subset].duplicated(keep=keep)
result = df.duplicated(keep=keep, subset=subset)
tm.assert_series_equal(result, expected)
def test_drop_duplicates():
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': range(8),
})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.loc[[]]
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.loc[[0, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.loc[[0]]
tm.assert_frame_equal(result, expected)
# consider everything
df2 = df.loc[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
tm.assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('E', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
# GH 11376
df = DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
tm.assert_frame_equal(df.drop_duplicates(), expected)
df = DataFrame([[1, 0], [0, 2]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-2, 0], [0, -4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = DataFrame([[-x, x], [0, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-x, x], [x, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ['first', 'last', False]:
assert df.duplicated(keep=keep).sum() == 0
def test_duplicated_on_empty_frame():
# GH 25184
df = DataFrame(columns=['a', 'b'])
dupes = df.duplicated('a')
result = df[dupes]
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_with_duplicate_column_names():
# GH17836
df = DataFrame([
[1, 2, 5],
[3, 4, 6],
[3, 4, 7]
], columns=['a', 'a', 'b'])
result0 = df.drop_duplicates()
tm.assert_frame_equal(result0, df)
result1 = df.drop_duplicates('a')
expected1 = df[:2]
tm.assert_frame_equal(result1, expected1)
def test_drop_duplicates_for_take_all():
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': range(8),
})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
tm.assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_tuple():
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': range(8),
})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.loc[[]] # empty df
assert len(result) == 0
tm.assert_frame_equal(result, expected)
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('df', [
DataFrame(),
DataFrame(columns=[]),
DataFrame(columns=['A', 'B', 'C']),
DataFrame(index=[]),
DataFrame(index=['A', 'B', 'C'])
])
def test_drop_duplicates_empty(df):
# GH 20516
result = df.drop_duplicates()
tm.assert_frame_equal(result, df)
result = df.copy()
result.drop_duplicates(inplace=True)
tm.assert_frame_equal(result, df)
def test_drop_duplicates_NA():
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': range(8),
})
# single column
result = df.drop_duplicates('A')
expected = df.loc[[0, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.loc[[1, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.loc[[0, 2, 3, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.loc[[1, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.loc[[6]]
tm.assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': range(8),
})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.loc[[3, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.loc[[0, 1, 2, 4]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.loc[[1, 3, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.loc[[1]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all():
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
tm.assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_inplace():
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': range(8),
})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.loc[[6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.loc[[]]
result = df
tm.assert_frame_equal(result, expected)
assert len(df) == 0
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.loc[[0, 1, 2, 3]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.loc[[0, 5, 6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.loc[[0]]
result = df
tm.assert_frame_equal(result, expected)
# consider everything
orig2 = orig.loc[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
tm.assert_frame_equal(result, expected)
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\nimport logging\nfrom collections import UserDict\nimport pymysql\nimport re\n\nlog = logging.getLogger('tyggbot')\n\n\nclass Emote:\n def __init__(self):\n self.id = -1 # An ID of -1 means the emote will be inserted on sync\n self.... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nimport logging\nfrom collections import UserDict\nimport pymysql\nimport re\n\nlog = logging.getLogger('tyggbot')\n\n\nclass Emote:\n def __init__(self):\n self.id = -1 # An ID of -1 means the emote will be inserted on syn... | ```python
import logging
from collections import UserDict
import pymysql
import re
log = logging.getLogger('tyggbot')
class Emote:
def __init__(self):
self.id = -1 # An ID of -1 means the emote will be inserted on sync
self.emote_id = None
self.code = None # This value will be inserted when the update_emotes script is called, if necessary.
self.tm = 0
self.tm_record = 0
self.count = 0
self.needs_sync = False
self.regex = None
@classmethod
def load(cls, cursor, emote_id):
emote = cls()
emote.emote_id = emote_id
emote.regex = None
emote.needs_sync = True
return emote
@classmethod
def load_from_row(cls, row):
emote = cls()
emote.id = row['id']
emote.emote_id = row['emote_id']
emote.code = row['code']
if not emote.emote_id:
emote.regex = re.compile('(?<![^ ]){0}(?![^ ])'.format(re.escape(emote.code)))
emote.count = row['count']
emote.tm_record = row['tm_record']
return emote
def add(self, count, reactor):
self.count += count
self.tm += count
self.needs_sync = True
if self.tm > self.tm_record:
self.tm_record = self.tm
reactor.execute_delayed(60, self.reduce, (count, ))
def reduce(self, count):
self.tm -= count
def sync(self, cursor):
if self.id == -1:
cursor.execute('INSERT INTO `tb_emote` (`emote_id`, `code`, `tm_record`, `count`) VALUES (%s, %s, %s, %s)',
(self.emote_id, self.code, self.tm_record, self.count))
self.id = cursor.lastrowid
else:
cursor.execute('UPDATE `tb_emote` SET `tm_record`=%s, `count`=%s WHERE `id`=%s',
(self.tm_record, self.count, self.id))
class EmoteManager(UserDict):
def __init__(self, sqlconn):
UserDict.__init__(self)
self.sqlconn = sqlconn
self.custom_data = []
def get_cursor(self):
self.sqlconn.ping()
return self.sqlconn.cursor(pymysql.cursors.DictCursor)
def get_normal_cursor(self):
self.sqlconn.ping()
return self.sqlconn.cursor()
def sync(self):
self.sqlconn.autocommit(False)
cursor = self.get_normal_cursor()
for emote in [emote for k, emote in self.data.items() if emote.needs_sync]:
emote.sync(cursor)
cursor.close()
self.sqlconn.autocommit(True)
def load(self):
self.data = {}
self.custom_data = []
cursor = self.get_cursor()
cursor.execute('SELECT * FROM `tb_emote`')
for row in cursor:
emote = Emote.load_from_row(row)
self.add_to_data(emote)
cursor.close()
def add_to_data(self, emote):
if emote.emote_id:
self.data[emote.emote_id] = emote
if emote.code:
self.data[emote.code] = emote
else:
self.custom_data.append(emote)
if emote.code:
self.data['custom_' + emote.code] = emote
def __getitem__(self, key):
if key not in self.data:
try:
# We can only dynamically add emotes that are ID-based
value = int(key)
except ValueError:
return None
log.info('Adding new emote with ID {0}'.format(value))
emote = Emote.load(self.get_cursor(), value)
self.add_to_data(emote)
return self.data[key]
def find(self, key):
try:
emote_id = int(key)
except ValueError:
emote_id = None
if emote_id:
return self.data[emote_id]
else:
key = str(key)
if key in self.data:
return self.data[key]
else:
for emote in self.custom_data:
if emote.code == key:
return emote
return None
``` |
[
{
"content": "```python\nfrom __future__ import absolute_import\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\n\nclass SetupStack(object):\n def __init__(self):\n self._on_teardown = []\n\n def add_teardown(self, teardown):\n self._on_teardown.append(teardown)\n\n def tear... | [
{
"content": "<|memory_start|>```python\nfrom __future__ import absolute_import\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\n\nclass SetupStack(object):\n def __init__(self):\n self._on_teardown = []\n\n def add_teardown(self, teardown):\n self._on_teardown.append(teardown)... | ```python
from __future__ import absolute_import
import os
import shutil
import tempfile
import unittest
class SetupStack(object):
def __init__(self):
self._on_teardown = []
def add_teardown(self, teardown):
self._on_teardown.append(teardown)
def tear_down(self):
for func in reversed(self._on_teardown):
func()
class TearDownConvenience(object):
def __init__(self, setup_stack=None):
self._own_setup_stack = setup_stack is None
if setup_stack is None:
setup_stack = SetupStack()
self._setup_stack = setup_stack
# only call this convenience method if no setup_stack was supplied to c'tor
def tear_down(self):
assert self._own_setup_stack
self._setup_stack.tear_down()
class TempDirMaker(TearDownConvenience):
def make_temp_dir(self, dir_=None):
temp_dir = tempfile.mkdtemp(
prefix="tmp-%s-" % self.__class__.__name__, dir=dir_)
def tear_down():
shutil.rmtree(temp_dir)
self._setup_stack.add_teardown(tear_down)
return temp_dir
class MonkeyPatcher(TearDownConvenience):
Unset = object()
def monkey_patch(self, obj, name, value):
orig_value = getattr(obj, name)
setattr(obj, name, value)
def reverse_patch():
setattr(obj, name, orig_value)
self._setup_stack.add_teardown(reverse_patch)
def _set_environ(self, env, name, value):
if value is self.Unset:
try:
del env[name]
except KeyError:
pass
else:
env[name] = value
def monkey_patch_environ(self, name, value, env=os.environ):
orig_value = env.get(name, self.Unset)
self._set_environ(env, name, value)
def reverse_patch():
self._set_environ(env, name, orig_value)
self._setup_stack.add_teardown(reverse_patch)
class FixtureFactory(object):
def __init__(self):
self._setup_stack = SetupStack()
self._context_managers = {}
self._fixtures = {}
def register_context_manager(self, name, context_manager):
self._context_managers[name] = context_manager
def get_fixture(self, name, add_teardown):
context_manager = self._context_managers[name]
fixture = context_manager.__enter__()
add_teardown(lambda: context_manager.__exit__(None, None, None))
return fixture
def get_cached_fixture(self, name):
fixture = self._fixtures.get(name)
if fixture is None:
fixture = self.get_fixture(name, self._setup_stack.add_teardown)
self._fixtures[name] = fixture
return fixture
def tear_down(self):
self._setup_stack.tear_down()
class TestCase(unittest.TestCase):
def setUp(self):
self._setup_stack = SetupStack()
self._monkey_patcher = MonkeyPatcher(self._setup_stack)
def tearDown(self):
self._setup_stack.tear_down()
def register_context_manager(self, name, context_manager):
return self.fixture_factory.register_context_manager(name,
context_manager)
def get_fixture(self, name):
return self.fixture_factory.get_fixture(name, self.add_teardown)
def get_cached_fixture(self, name):
return self.fixture_factory.get_cached_fixture(name)
def add_teardown(self, *args, **kwds):
self._setup_stack.add_teardown(*args, **kwds)
def make_temp_dir(self, *args, **kwds):
return TempDirMaker(self._setup_stack).make_temp_dir(*args, **kwds)
def monkey_patch(self, *args, **kwds):
return self._monkey_patcher.monkey_patch(*args, **kwds)
def monkey_patch_environ(self, *args, **kwds):
return self._monkey_patcher.monkey_patch_environ(*args, **kwds)
def assert_contains(self, container, containee):
self.assertTrue(containee in container,
"%r not in %r" % (containee, container))
def assert_less_than(self, got, expected):
self.assertTrue(got < expected, "%r >= %r" % (got, expected))
``` |
[
{
"content": "Replicate the source code:\n```python\n#!/usr/bin/env python3\nimport numpy as np\nimport numpy.random\nfrom time import time\n\n# web mercator projection functions\n# ---------------------------------\ndef linear_lat(lat, atanh = np.arctanh, sin = np.sin, radians = np.radians):\n return atanh(... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n#!/usr/bin/env python3\nimport numpy as np\nimport numpy.random\nfrom time import time\n\n# web mercator projection functions\n# ---------------------------------\ndef linear_lat(lat, atanh = np.arctanh, sin = np.sin, radians = np.radians):\n ... | ```python
#!/usr/bin/env python3
import numpy as np
import numpy.random
from time import time
# web mercator projection functions
# ---------------------------------
def linear_lat(lat, atanh = np.arctanh, sin = np.sin, radians = np.radians):
return atanh(sin(radians(lat)))
def inv_linear_lat(ll, asin = np.arcsin, tanh = np.tanh, degrees = np.degrees):
return degrees(asin(tanh(ll)))
def lng_to_x(w, lng_min, lng_max, lng):
return (lng - lng_min) * (w / (lng_max - lng_min))
def lat_to_y(h, lat_min, lat_max, lat):
return (linear_lat(lat) - linear_lat(lat_min)) * (h / (linear_lat(lat_max) - linear_lat(lat_min)))
def x_to_lng(w, lng_min, lng_max, x):
return x * ((lng_max - lng_min)/w) + lng_min
def y_to_lat(h, lat_min, lat_max, y):
return inv_linear_lat(y * ((linear_lat(lat_max) - linear_lat(lat_min))/h) + linear_lat(lat_min))
# heatmap data generation
# -----------------------
class HeatMap:
def __init__(self, lnglat, width, height, westlng, eastlng, southlat, northlat):
# compute pixel bounds of the map
x = np.append(np.arange(0, width, 5), width)
y = np.append(np.arange(0, height, 5), height)
# project pixel bounds coordinates (x, y -> lng, lat)
edgelng = x_to_lng(width, westlng, eastlng, x)
centerlng = x_to_lng(width, westlng, eastlng, (x[1:] + x[:-1])/2)
edgelat = y_to_lat(height, southlat, northlat, y)
centerlat = y_to_lat(height, southlat, northlat, (y[1:] + y[:-1])/2)
# prepare computation parameters
self.bins = edgelng, edgelat
self.range = (westlng, eastlng), (southlat, northlat)
self.iterator = lnglat.chunks()
self.heatmap = None
# prepare compression parameters
scalelat = (edgelat[1:] - edgelat[:-1]).min() / 2
self.approx_centerlat = numpy.rint((centerlat - centerlat[0]) / scalelat)
scalelng = edgelng[1] - edgelng[0] # longitude is linear
self.approx_centerlng = numpy.rint((centerlng - centerlng[0]) / scalelng)
self.scales = dict(lat=scalelat, lng=scalelng)
self.offsets = dict(lat=centerlat[0], lng=centerlng[0])
# stream status parameters
self.done = False
def compute(self, time_credit):
# make histogram:
# - create a pixel grid
# - given a tuple (lng, lat) increment the corresponding pixel
deadline = time() + time_credit
deadline_reached = False
for chunk in self.iterator:
lng, lat = chunk.columns
chunk_heatmap = np.histogram2d(lng, lat, bins=self.bins, range=self.range)[0]
if self.heatmap is None:
self.heatmap = chunk_heatmap.T
else:
self.heatmap += chunk_heatmap.T
if time() > deadline:
deadline_reached = True
break
if not deadline_reached:
# we left the loop because of the end of iteration
self.done = True
# get sparse matrix representation: (lat, lng, intensity) tuples.
# in order to lower network usage, we will transfer this data in a
# compressed form: lng & lat values will be transfered as integers
# together with a scaling factor and an offset to be applied.
def compressed_form(self):
# count number of points
count = int(self.heatmap.sum())
if count == 0:
# if no points, return empty data
data = dict(lat = [], lng = [], val = [])
else:
# apply threshold and
# compute approximated sparse matrix data
nonzero_xy = ((self.heatmap / self.heatmap.max()) > 0.05).nonzero()
nonzero_x = nonzero_xy[1]
nonzero_y = nonzero_xy[0]
data = dict(
lat = self.approx_centerlat[nonzero_y].astype(int).tolist(),
lng = self.approx_centerlng[nonzero_x].astype(int).tolist(),
val = self.heatmap[nonzero_xy].astype(int).tolist()
)
return dict(
data = data,
scales = self.scales,
offsets = self.offsets,
count = count,
done = self.done
)
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 10 15:25:02 2016\n\n@author: jessime\n\"\"\"\n\nimport pygame\nimport minesweeper_game.events as events\n\nclass Controller():\n\n def __init__(self, ev_manager, model):\n self.ev_manager = ev_manager\n self.model = mo... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 10 15:25:02 2016\n\n@author: jessime\n\"\"\"\n\nimport pygame\nimport minesweeper_game.events as events\n\nclass Controller():\n\n def __init__(self, ev_manager, model):\n self.ev_manager = ev_manager\n ... | ```python
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 10 15:25:02 2016
@author: jessime
"""
import pygame
import minesweeper_game.events as events
class Controller():
def __init__(self, ev_manager, model):
self.ev_manager = ev_manager
self.model = model
self.ev_manager.register(self)
self.key_event_checks = [self.check_arrows,
self.check_others,
self.check_state]
def check_arrows(self, message, event):
"""Move player on board if arrow key has been pressed."""
if event.key == pygame.K_LEFT:
message = events.TryChangePos('left')
elif event.key == pygame.K_RIGHT:
message = events.TryChangePos('right')
elif event.key == pygame.K_UP:
message = events.TryChangePos('up')
elif event.key == pygame.K_DOWN:
message = events.TryChangePos('down')
return message
def check_others(self, message, event):
if event.key == pygame.K_ESCAPE:
message = events.UserQuit()
elif event.key == pygame.K_SPACE:
message = events.TryButtonPress(self.model.pos)
elif event.key == pygame.K_f:
message = events.TryFlagToggle(self.model.pos)
elif event.key == pygame.K_a:
message = events.CountUnflagged()
return message
def check_state(self, message, event):
if event.key == pygame.K_b:
message = events.CheckBoard()
elif event.key == pygame.K_m:
message = events.CheckPlayer()
return message
def notify(self, event):
if isinstance(event, events.LoopEnd):
for pygame_event in pygame.event.get():
message = None
if pygame_event.type == pygame.KEYUP:
for func in self.key_event_checks:
message = func(message, pygame_event)
if message:
self.ev_manager.post(message)
break
``` |
[
{
"content": "```python\nimport pytest\nfrom tri_struct import merged\n\nfrom tri_form import Field, Form\nfrom tri_form.compat import render_to_string, format_html, field_defaults_factory, render_template, Template\nfrom .compat import RequestFactory, SafeText\n\n\ndef test_render_to_string():\n assert rend... | [
{
"content": "<|memory_start|>```python\nimport pytest\nfrom tri_struct import merged\n\nfrom tri_form import Field, Form\nfrom tri_form.compat import render_to_string, format_html, field_defaults_factory, render_template, Template\nfrom .compat import RequestFactory, SafeText\n\n\ndef test_render_to_string():\... | ```python
import pytest
from tri_struct import merged
from tri_form import Field, Form
from tri_form.compat import render_to_string, format_html, field_defaults_factory, render_template, Template
from .compat import RequestFactory, SafeText
def test_render_to_string():
assert render_to_string(
template_name='tri_form/non_editable.html',
request=RequestFactory().get('/'),
context=dict(
field=dict(
id=SafeText('<a b c><d><e>'),
rendered_value=SafeText('<a b c><d><e>'),
),
)
).strip() == '<span id="<a b c><d><e>"><a b c><d><e></span>'
def test_format_html():
assert format_html('<{a}>{b}{c}', a='a', b=format_html('<b>'), c='<c>') == '<a><b><c>'
def test_format_html2():
assert render_template(RequestFactory().get('/'), Template('{{foo}}'), dict(foo=format_html('<a href="foo">foo</a>'))) == '<a href="foo">foo</a>'
def test_format_html3():
assert render_template(RequestFactory().get('/'), Template('{{foo}}'), dict(foo=format_html('{}', format_html('<a href="foo">foo</a>')))) == '<a href="foo">foo</a>'
def test_format_html4():
actual = render_template(
RequestFactory().get('/'),
Template('{{foo}}'),
dict(
foo=Form(fields=[Field(name='foo')]),
)
)
print(actual)
assert '<input type="text" value="" name="foo" id="id_foo"' in actual
def test_format_html5():
actual = Form(fields=[Field(name='foo')], request=RequestFactory().get('/')).render()
print(actual)
assert type(actual) == SafeText
def test_format_html6():
form = Form(fields=[Field(name='foo')], request=RequestFactory().get('/'))
actual = form.fields_by_name.foo.render()
print(actual)
assert type(actual) == SafeText
def test_render_template():
actual = render_template(RequestFactory().get('/'), Template('{{foo}}'), dict(foo=1))
print(actual)
assert type(actual) == SafeText
@pytest.mark.django
def test_field_defaults_factory():
from django.db import models
base = dict(parse_empty_string_as_none=True, required=True, display_name=None)
assert field_defaults_factory(models.CharField(null=False, blank=False)) == merged(base, dict(parse_empty_string_as_none=False))
assert field_defaults_factory(models.CharField(null=False, blank=True)) == merged(base, dict(parse_empty_string_as_none=False, required=False))
assert field_defaults_factory(models.CharField(null=True, blank=False)) == merged(base, dict(required=False))
assert field_defaults_factory(models.CharField(null=True, blank=True)) == merged(base, dict(required=False))
@pytest.mark.django
def test_field_defaults_factory_boolean():
from django.db import models
django_null_default = not models.BooleanField().null
base = dict(parse_empty_string_as_none=django_null_default, display_name=None)
assert field_defaults_factory(models.BooleanField(null=False, blank=False)) == merged(base, dict(parse_empty_string_as_none=False))
assert field_defaults_factory(models.BooleanField(null=False, blank=True)) == merged(base, dict(parse_empty_string_as_none=False))
assert field_defaults_factory(models.BooleanField(null=True, blank=False)) == base
assert field_defaults_factory(models.BooleanField(null=True, blank=True)) == base
``` |
[
{
"content": "Produce an exact reconstruction of the code:\n```python\n#!/usr/bin/env python\n'''\nUse Pexpect to change the logging buffer size(logging buffered <size>).\nVerify this change by examining the output of 'show run'.\n'''\n\nimport pexpect\nimport time\nfrom getpass import getpass\n\ndef login(ssh_... | [
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n#!/usr/bin/env python\n'''\nUse Pexpect to change the logging buffer size(logging buffered <size>).\nVerify this change by examining the output of 'show run'.\n'''\n\nimport pexpect\nimport time\nfrom getpass import getpass\n... | ```python
#!/usr/bin/env python
'''
Use Pexpect to change the logging buffer size(logging buffered <size>).
Verify this change by examining the output of 'show run'.
'''
import pexpect
import time
from getpass import getpass
def login(ssh_conn):
'''
Handle sending password
'''
password = getpass()
ssh_conn.sendline(password)
ssh_conn.expect('#')
def find_prompt(ssh_conn):
'''
Find the current prompt
Pexpect is non-greedy which is problematic
'''
ssh_conn.send('\n')
time.sleep(1)
ssh_conn.expect('#')
prompt = ssh_conn.before + ssh_conn.after
return prompt.strip()
def disable_paging(ssh_conn,pattern='#',cmd='terminal length 0'):
'''
Disable the paging of output i.e. --More--
'''
ssh_conn.sendline(cmd)
ssh_conn.expect(pattern)
def main():
ip_addr = '50.76.53.27'
username = 'pyclass'
#password = '88newclass'
port = 8022
ssh_conn = pexpect.spawn('ssh -l {} {} -p {}'.format(username, ip_addr, port))
ssh_conn.timeout = 3
login(ssh_conn)
prompt = find_prompt(ssh_conn)
disable_paging(ssh_conn,prompt)
ssh_conn.sendline('config t')
ssh_conn.expect('#')
ssh_conn.sendline('logging buffered 188888')
ssh_conn.expect('#')
ssh_conn.sendline('end')
ssh_conn.expect(prompt)
ssh_conn.sendline('show run | inc logging buffer')
ssh_conn.expect(prompt)
print '\n>>>>'
print ssh_conn.before
print '>>>>\n'
if __name__ == "__main__":
main()
``` |
[
{
"content": "Here is the script:\n```python\n# -*- coding: utf-8 -*-\n\n# This file is part of Tumulus.\n#\n# Copyright (C) 2013 OKso (http://okso.me)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# This file is part of Tumulus.\n#\n# Copyright (C) 2013 OKso (http://okso.me)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as pub... | ```python
# -*- coding: utf-8 -*-
# This file is part of Tumulus.
#
# Copyright (C) 2013 OKso (http://okso.me)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
See reference: http://www.javascriptkit.com/domref/elementproperties.shtml
'''
from .element import Element, EmptyElement
class Tag(object):
def __init__(self, tagname, element=Element):
self.tagname = tagname
self.element = element
def __call__(self, *inner, **kwargs):
return self.element(self.tagname, components=inner, attributes=kwargs)
class EmptyTag(Tag):
def __call__(self, *inner, **kwargs):
return EmptyElement(self.tagname, attributes=kwargs)
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# -*- coding: utf-8 -*-\n#------------------------------------------------------------\n# pelisalacarta - XBMC Plugin\n# Canal para series.ly\n# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/\n#------------------------------------------------------... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#------------------------------------------------------------\n# pelisalacarta - XBMC Plugin\n# Canal para series.ly\n# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/\n#--------------------------------------... | ```python
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para series.ly
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import sys
import os
import urllib2
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "seriesly"
__category__ = "S,A"
__type__ = "generic"
__title__ = "Series.ly"
__language__ = "ES"
__creationdate__ = "20111119"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
"""Handler para library_service"""
def episodios(item):
# Obtiene de nuevo los tokens
episode_list = serie_seasons(item)
#episode_list = serie_capitulos(item)
for episode in episode_list:
episode.extra = item.extra
return episode_list
"""Handler para launcher (library)"""
def findvideos(item):
return multiple_links(item)
"""Handler para launcher (library)"""
def play(item):
return links(item)
def mainlist(item):
logger.info("[seriesly.py] mainlist")
itemlist = []
if config.get_setting("serieslyaccount")!="true":
itemlist.append( Item( channel=__channel__ , title="Habilita tu cuenta en la configuración..." , action="openconfig" , url="" , folder=False ) )
else:
auth_token, user_token = getCredentials()
if user_token:
extra_params = '%s|%s' % ( auth_token, user_token )
itemlist.append( Item(channel=__channel__, title="Buscar", action="search") )
itemlist.append( Item(channel=__channel__, title="Mis series", action="categorias", extra=extra_params, url="series" ) )
itemlist.append( Item(channel=__channel__, title="Mis pelis", action="categorias", extra=extra_params, url="movies" ) )
itemlist.append( Item(channel=__channel__, title="Mis documentales", action="categorias", extra=extra_params, url="documentaries" ) )
itemlist.append( Item(channel=__channel__, title="Mis tvshows", action="categorias", extra=extra_params, url="tvshows" ) )
itemlist.append( Item(channel=__channel__, title="Mis listas", action="listasMenu", extra=extra_params, url="listas" ) )
else:
itemlist.append( Item( channel=__channel__ , title="Cuenta incorrecta, revisa la configuración..." , action="" , url="" , folder=False ) )
return itemlist
def openconfig(item):
if "xbmc" in config.get_platform() or "boxee" in config.get_platform():
config.open_settings( )
return []
def getCredentials():
auth_token,user_token = perform_login()
return [auth_token,user_token]
def categorias(item):
itemlist = []
if item.url=="movies":
itemlist.append( Item(channel=__channel__, title="Vistas", action="categoria", url='Vistas', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Favoritas", action="categoria", url='Favouritas', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Pendientes", action="categoria", url='Pendientes', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Peliculas Mas Vistas", action="mas_vistas", url=item.url ) )
itemlist.append( Item(channel=__channel__, title="Peliculas Por Categorias", action="menu_cat", url=item.url, extra="cat" ) )
#itemlist.append( Item(channel=__channel__, title="Manolito caramierda", action="manolito", url=item.url) )
elif item.url=="series":
itemlist.append( Item(channel=__channel__, title="Viendo", action="categoria", url='Viendo', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Finalizadas", action="categoria", url='Finalizada', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Pendientes", action="categoria", url='Pendiente', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Series Mas Vistas", action="mas_vistas" , url=item.url ) )
itemlist.append( Item(channel=__channel__, title="Series Por Categorias", action="menu_cat", url=item.url, extra="cat" ) )
elif item.url=="documentaries":
itemlist.append( Item(channel=__channel__, title="Vistos", action="categoria", url='Vistas', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Favoritas", action="categoria", url='Favoritas', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Pendientes", action="categoria", url='Pendientes', extra=item.url) )
itemlist.append( Item(channel=__channel__,title="Documentales mas vistos",action="mas_vistas",url=item.url))
elif item.url=="tvshows":
itemlist.append( Item(channel=__channel__, title="Viendo", action="categoria", url='Viendo', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Finalizadas", action="categoria", url='Finalizada', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Pendientes", action="categoria", url='Pendiente', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Tvshows Mas Vistos", action="mas_vistas" , url=item.url ) )
itemlist.append( Item(channel=__channel__, title="Tvshows Por Categorias", action="menu_cat", url=item.url, extra="cat" ) )
itemlist.append( Item(channel=__channel__, title="Buscar", action="search", url=item.url) )
return itemlist
def menu_cat(item):
itemlist=[]
categorias=get_constant("categorias")
for c in categorias:
itemlist.append( Item(channel=__channel__, title=categorias[c], action="search_cat", extra=c, url=item.url, plot="1") )
itemlist = sorted( itemlist , key=lambda item: item.title)
return itemlist
def categoria(item):
logger.info("[seriesly.py] categoria")
# Obtiene de nuevo los tokens
auth_token, user_token = getCredentials()
# Extrae las entradas (carpetas)
post = 'auth_token=%s&user_token=%s' % ( qstr(auth_token), qstr(user_token) )
tipo=item.extra
# Videos Usuario (Filtradas por categoria)
url='http://api.series.ly/v2/user/media/%s'%tipo
data = scrapertools.cache_page(url, post=post)
List = load_json(data)
if "error" in List:
if List["error"]!=0:
error_message(List["error"])
return []
else:
return []
if List[tipo] == None : List[tipo] = []
logger.info("hay %d %s" % (len(List[tipo]), tipo ))
cat_filter = item.url
itemlist = []
default = '2'
if tipo=="series" or tipo =="tvshows":
if item.url == 'Pendiente' : cat_filter = 2
elif item.url == 'Viendo' : cat_filter = 1
elif item.url == 'Finalizada' : cat_filter = 3
if tipo=="movies" or tipo=="documentaries":
if item.url == 'Favouritas' : cat_filter = 2
elif item.url == 'Vistas' : cat_filter = 1
elif item.url == 'Pendientes' : cat_filter = 3
for movieItem in List[tipo]:
# Añade al listado de XBMC filtrando por categoria
status = movieItem['status']
if status == cat_filter:
itemlist.append( generate_item(movieItem, tipo, auth_token))
itemlist = sorted( itemlist , key=lambda item: item.title)
return itemlist
def manolito(item):
itemlist = []
itemlist.append( Item(channel=__channel__, title="Si, po toma", action="categoria", url='Pendientes', extra=item.url) )
itemlist.append( Item(channel=__channel__, title="Nunca lo sabremos", action="mas_vistas", url=item.url ) )
itemlist.append( Item(channel=__channel__, title="O noooh", action="menu_cat", url=item.url, extra="cat" ) )
itemlist.append( Item(channel=__channel__, title="Manolito caramierda de nuevo", action="manolito", url=item.url) )
return itemlist
def serie_seasons(item):
"""
Show list of seasons
"""
logger.info("[seriesly.py serie_seasons")
print "# Series Seasons #"
# Get tokens again
auth_token, user_token = getCredentials()
post = 'auth_token=%s&user_token=%s' % ( qstr(auth_token), qstr(user_token) )
serieInfo = load_json(scrapertools.cache_page(item.url, post=post))
if "error" in serieInfo and serieInfo["error"]:
error_message(serieInfo["error"])
return []
if serieInfo == None:
serieInfo = {}
if (not serieInfo.has_key('seasons_episodes')) or serieInfo['seasons_episodes'] == None:
serieInfo['seasons_episodes'] = []
seasonList = []
for i in serieInfo["seasons_episodes"]:
this_season = serieInfo['seasons_episodes'][i][0]
seasonList.append(
Item(
channel=__channel__,
action = 'serie_capitulos',
title = 'Temporada %s' % this_season['season'],
url = item.url,
thumbnail = item.thumbnail,
plot = "",
show = item.show,
extra = item.extra
)
)
seasonList = sorted( seasonList , key=lambda item: item.title)
print "Extra pasado es:"
print item.extra
return seasonList
def serie_capitulos(item):
print "# Serie Capitulos #"
print "Temporada: ", item.title[-1]
logger.info('[seriesly.py] serie_capitulos')
# Obtiene de nuevo los tokens
auth_token, user_token = getCredentials()
season_number = item.title[-1]
# Extrae las entradas (carpetas)
post = 'auth_token=%s&user_token=%s' % ( qstr(auth_token), qstr(user_token) )
serieInfo = load_json(scrapertools.cache_page(item.url, post=post))
# ~ Todos los videos tienen error=0
if "error" in serieInfo:
if serieInfo["error"]!=0:
error_message(serieInfo["error"])
return []
else:
return []
if serieInfo == None:
serieInfo = {}
if (not serieInfo.has_key('seasons_episodes')) or serieInfo['seasons_episodes'] == None:
serieInfo['seasons_episodes'] = []
# Juntamos todos los episodios con enlaces en una sola lista
episodeList=[]
print "All episodes in a single list"
for i in serieInfo["seasons_episodes"]:
if str(serieInfo['seasons_episodes'][i][0]['season']) == str(season_number):
for j in serieInfo["seasons_episodes"][i]:
if j['haveLinks']: episodeList.append(j)
logger.info('[seriesly serie_capitulos] hay %d capitulos' % len(episodeList))
itemlist = []
for episode in episodeList:
if episode.has_key('watched'):
viewed = episode['watched']
if viewed == False : episode['estado'] = ' [Pendiente]'
elif viewed == True : episode['estado'] = ' [Visto]'
else : episode['estado'] = ' [?]'
else:
episode['estado'] = ''
# Añadimos un 0 al principo de la temporada y capitulo para su ordenacion
episode["episode"] = str(episode["episode"])
if len(episode["episode"])==1:
episode["episode"]="0"+episode["episode"]
episode["season"] = str(episode["season"])
if len(episode["season"])==1:
episode["season"]="0"+episode["season"]
itemlist.append(
Item(channel=__channel__,
action = 'multiple_links',
title = '%(season)sx%(episode)s %(title)s%(estado)s' % episode,
url = 'http://api.series.ly/v2/media/episode/links?&idm=%(idm)s&mediaType=5' % episode,
thumbnail = item.thumbnail,
plot = "",
show = item.show,
extra = item.extra
)
)
itemlist = sorted( itemlist , key=lambda item: item.title)
if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"):
itemlist.append( Item(channel='seriesly', title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="serie_capitulos###", show=item.show) )
return itemlist
def mas_vistas(item):
logger.info("[seriesly.py] mas_vistas")
# Obtiene de nuevo los tokens
auth_token, user_token = getCredentials()
post = 'auth_token=%s&user_token=%s&limit=100' % ( qstr(auth_token), qstr(user_token) )
# Extrae las entradas (carpetas)
tipo=item.url
url="http://api.series.ly/v2/media/most_seen/"+tipo
topInfo = load_json(scrapertools.cache_page(url, post=post))
if "error" in topInfo:
if topInfo["error"]!=0:
error_message(topInfo["error"])
return []
else:
return []
if topInfo == None : topInfo = {}
if topInfo[tipo] == None : topInfo[tipo] = []
logger.info("hay %d videos" % len(topInfo[tipo]))
itemlist = []
for movieItem in topInfo[tipo]:
# Añade al listado de XBMC
itemlist.append( generate_item(movieItem, tipo, auth_token))
return itemlist
def listasMenu(item):
# Obtiene de nuevo los tokens
auth_token, user_token = getCredentials()
post = 'auth_token=%s&user_token=%s' % ( qstr(auth_token), qstr(user_token) )
# Extrae las entradas (carpetas)
url="http://api.series.ly/v2/user/media/lists"
listasInfo = load_json(scrapertools.cache_page(url, post=post))
ownList=[]
followList=[]
if len(listasInfo["own"]) >=1 :
for lista in listasInfo["own"]:
logger.info(str(lista))
if "last_medias" in lista:
ownList.append({"id_list":lista["id_list"],
"title": lista["title"],
"medias_num": lista["medias_num"] })
"""title=lista["title"]
ownList[title]=[]
for element in lista["last_medias"]:
video={ 'idm': element["idm"],
'seasons': 0,
'episodes':0,
'poster':{"large":element["img"]},
'name': element["name"],
'mediaType':get_constant("mediaType")[int(element["mediaType"])],
'auth_token':auth_token
}
ownList[title].append(video)
"""
if len(listasInfo["following"]) >=1 :
for lista in listasInfo["following"]:
logger.info(str(lista))
if "last_medias" in lista:
followList.append({"id_list":lista["id_list"],
"title": lista["title"],
"medias_num": lista["medias_num"] })
import json
itemlist=[]
itemlist.append( Item(channel=__channel__, title="Propias", action="listas", url='Viendo', extra=json.dumps(ownList)) )
itemlist.append( Item(channel=__channel__, title="Siguiendo", action="listas", url='Finalizada', extra=json.dumps(followList)) )
return itemlist
def listas(item):
logger.info("[seriesly.py] listas_vistas")
import urllib
import json
d=urllib.unquote(item.extra)
listaDict=load_json(d)
itemlist=[]
for element in listaDict:
logger.info(element)
title=element["title"]
itemlist.append( Item(channel=__channel__, title=title, action="lista", url='Viendo', extra=json.dumps(element)) )
return itemlist
def lista(item):
import urllib
import json
d=urllib.unquote(item.extra)
listaDict=load_json(str(d))
# Obtiene de nuevo los tokens
auth_token, user_token = getCredentials()
post = 'auth_token=%s&id_list=%s' % ( qstr(auth_token), qstr(listaDict["id_list"]) )
# Extrae las entradas (carpetas)
url="http://api.series.ly/v2/media/list"
lista = load_json(scrapertools.cache_page(url, post=post))
logger.info(str(lista))
itemlist=[]
for element in lista["list_info"]["medias"]:
logger.info(str(element))
video={ 'idm': element["idm"],
'seasons': 0,
'episodes':0,
'poster':{"large":element["img"]},
'name': element["name"],
'mediaType':get_constant("mediaType")[int(element["mediaType"])],
'auth_token':auth_token
}
itemlist.append(generate_item(video, video["mediaType"], auth_token))
return itemlist
def generate_item(video , tipo, auth_token):
logger.info('video')
logger.info(str(video))
if tipo == "None": return Item()
if 'name' not in video: return Item()
if tipo=="series" or tipo == "tvshows":
url = 'http://api.series.ly/v2/media/full_info?auth_token='+ auth_token+'&idm=%s&mediaType=%s' %(video["idm"],get_constant("mediaType")[tipo])
#Si la serie no tiene temporada, al abrirla tampoco tiene capitulos
if "seasons" not in video:
return Item()
if "seasons"==0:
action = 'serie_seasons'
title = '%(name)s Serie' % video
else:
action = 'serie_seasons'
title = '%(name)s (%(seasons)d Temporadas) (%(episodes)d Episodios)' % video
elif tipo =="movies" or tipo == "documentaries":
url = 'http://api.series.ly/v2/media/episode/links?auth_token='+ auth_token+'&idm=%s&mediaType=%s' %(video["idm"],get_constant("mediaType")[tipo])
if "year" not in video: video["year"]= ""
title = '%(name)s (%(year)s)' % video
action= 'multiple_links'
if "plot_es" not in video : video["plot_es"]= " "
logger.info(action)
item=Item(channel=__channel__,
action = action,
title = title,
url = url,
thumbnail = video['poster']["large"],
plot = video["plot_es"],
show = video['name'],
extra = ""
)
return item
def multiple_links(item):
logger.info("[seriesly.py] multiple_links")
# Obtiene de nuevo los tokens
auth_token, user_token = getCredentials()
# Extrae las entradas (carpetas)
post = 'auth_token=%s&user_token=%s' % ( qstr(auth_token), qstr(user_token) )
data = scrapertools.cache_page(item.url+"&"+post)
linkList = load_json(data)
if "error" in linkList:
if linkList["error"]!=0:
error_message(linkList["error"])
return []
else:
return []
if linkList == None : linkList = []
logger.info("hay %d videos" % len(linkList))
tipoList=["streaming","direct_download"]
itemlist = []
for tipo in tipoList:
if tipo in linkList:
for link in linkList[tipo]:
if "quality" not in link:
link["quality"]= ""
if link['subtitles']!="":
linktitle = '%(host)s - %(lang)s (sub %(subtitles)s) %(quality)s' % link
else:
linktitle = '%(host)s - %(lang)s %(quality)s' % link
itemlist.append(
Item(channel=__channel__,
action = "links",
title = linktitle,
url = link['video_url']+"?"+post,
thumbnail = item.thumbnail,
plot = "",
extra=link["date_created"]
)
)
return itemlist
def links(item):
itemlist = []
try:
count = 0
exit = False
while(not exit and count < 5):
#A veces da error al intentar acceder
try:
logger.info(str(item.url))
page = urllib2.urlopen(item.url)
urlvideo = "\"" + page.geturl() + "\""
logger.info(str(page.read()))
logger.info(item.url)
exit = True
except:
import traceback
logger.info(traceback.format_exc())
count = count + 1
logger.info("urlvideo="+urlvideo)
for video in servertools.findvideos(urlvideo) :
#scrapedtitle = title.strip() + " " + match[1] + " " + match[2] + " " + video[0]
scrapedtitle = scrapertools.htmlclean(video[0])
scrapedurl = video[1]
server = video[2]
itemlist.append( Item(channel=__channel__, action="play" , title=scrapedtitle, url=scrapedurl, thumbnail=item.thumbnail, plot="", server=server, extra="", category=item.category, fanart=item.thumbnail, folder=False))
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return itemlist
def search_cat(item,):
# Obtiene de nuevo los tokens
auth_token, user_token = getCredentials()
post = 'auth_token=%s' % ( qstr(auth_token) )
logger.info(str(item.url))
#busqueda general
url="http://api.series.ly/v2/media/browse"
post="auth_token="+auth_token+"&order=most_viewed"
#busquda por tipo
if item.url != "" :
mediaType=get_constant("mediaType")[item.url]
post=post+"&mediaType=%s" % mediaType
#busqueda por genero
if item.extra != "":
post=post+"&genre="+item.extra
#paginacion
if item.plot != "":
post=post+"&page="+item.plot
plot=int(item.plot)+1
item.plot=str(plot)
# Extrae las entradas (carpetas)
serieList = load_json(scrapertools.cache_page(url, post))
if "error" in serieList:
if serieList["error"]!=0:
error_message(serieList["error"])
return []
else:
return []
if serieList == None : serieList = []
logger.info("hay %d series" % len(serieList))
itemlist = []
for serieItem in serieList['results']["medias"]:
logger.info(str(serieItem))
tipo=get_constant("mediaType")[serieItem["mediaType"]]
itemlist.append(generate_item(serieItem, tipo, auth_token))
#Añadimos Pagina Siguiente
if len(itemlist)>0:
itemlist.append( Item(channel=__channel__, title="Pagina Siguiente", action="search_cat", extra=item.extra, url=item.url, plot=item.plot ))
return itemlist
def search(item,texto="", categoria="*"):
item.channel="seriesly"
res = search_videos(item, texto)
return res
def search_videos(item, texto=None):
logger.info("[seriesly.py] search")
# Obtiene de nuevo los tokens
auth_token, user_token = getCredentials()
post = 'auth_token=%s' % ( qstr(auth_token) )
logger.info(str(item.url))
#Añadido por culpa del if de la paginacion
if texto is None:
query=""
else:
query=texto
#busqueda general
url="http://api.series.ly/v2/search"
post="auth_token="+auth_token+"&order=votes_num&onlyTitle=true&q="+query
#busquda por tipo
if item.url != "" :
mediaType=get_constant("mediaType")[item.url]
post=post+"&filter=%s" % mediaType
#busqueda por genero
if item.extra != "":
post=post+"&genere="+item.extra
#paginacion
if item.plot != "":
post=post+"&page="+item.plot
plot=int(item.plot)+1
item.plot=str(plot)
# Extrae las entradas (carpetas)
serieList = load_json(scrapertools.cache_page(url, post))
if "error" in serieList:
if serieList["error"]!=0:
error_message(serieList["error"])
return []
else:
return []
if serieList == None : serieList = []
logger.info("hay %d series" % len(serieList))
itemlist = []
for serieItem in serieList['response']['results']:
logger.info(str(serieItem))
tipo=get_constant("mediaType")[serieItem['object']["mediaType"]]
itemlist.append(generate_item(serieItem['object'], tipo, auth_token))
#Añadimos Pagina Siguiente
if texto is None:
itemlist.append( Item(channel=__channel__, title="Pagina Siguiente", action="search_videos", extra=item.extra, url=item.url, plot=item.plot ))
return itemlist
def load_json(data):
# callback to transform json string values to utf8
def to_utf8(dct):
rdct = {}
for k, v in dct.items() :
if isinstance(v, (str, unicode)) :
rdct[k] = v.encode('utf8', 'ignore')
else :
rdct[k] = v
return rdct
try:
import json
except:
try:
import simplejson as json
except:
from lib import simplejson as json
try :
json_data = json.loads(data, object_hook=to_utf8)
return json_data
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
''' URLEncode a string '''
def qstr(string):
return string # urllib2.quote(string)
def perform_login():
auth_token,user_token=check_token()
"""url="http://api.series.ly/v2/app/show_quota"
post="auth_token=%s&user_token=%s"%(auth_token,user_token)
logger.info(url)
data = scrapertools.cache_page(url,post=post)
logger.info("****")
logger.info(data)
logger.info("****")"""
if not auth_token or not user_token :
auth_token=generate_authtoken()
user_token=generate_usertoken(auth_token)
if not user_token:
return [auth_token,user_token]
logger.info("****")
logger.info(auth_token)
logger.info("****")
logger.info(user_token)
logger.info("****")
url="http://api.series.ly/v2/app/show_quota"
post="auth_token=%s&user_token=%s"%(user_token,auth_token)
return [auth_token,user_token]
def generate_authtoken():
url = "http://api.series.ly/v2/auth_token/"
#post='id_api=1363&secret=zHX3TPEW2tvZRFneAAU7'
post='id_api=8&secret=N5X54c4OeDUwU8dWBbMW'
data = scrapertools.cache_page(url,post=post)
logger.info("****")
logger.info(data)
logger.info("****")
auth_data= load_json(data)
if "error" in auth_data:
if auth_data["error"]!=0:
error_message(auth_data["error"])
return False
else:
return False
auth_token = auth_data["auth_token"]
path=config.get_data_path()
f =open(path+"seriesly_auth", "w+")
f.write(str(data+";"))
f.close()
return auth_token
def generate_usertoken(auth_token):
LOGIN = config.get_setting("serieslyuser")
PASSWORD = config.get_setting("serieslypassword")
url = "http://api.series.ly/v2/user/user_token"
post = "auth_token=%s&username=%s&password=%s&remember=1&user_agent=''" % ( auth_token, qstr(LOGIN), qstr(PASSWORD) )
data = scrapertools.cache_page(url,post=post)
logger.info("****")
logger.info(data)
logger.info("****")
user_data=load_json(data)
if "error" in user_data:
if user_data["error"]!=0:
error_message(user_data["error"])
return False
else:
return False
path=config.get_data_path()
logger.info(path)
f =open(path+"seriesly_auth", "a")
f.write(str(data))
logger.info(str(data))
f.close()
user_token=user_data["user_token"]
return user_token
def check_token():
path=config.get_data_path()
try:
f =open(path+"seriesly_auth", "r")
data= f.read()
f.close()
auth, user=data.split(";")
logger.info(data)
auth = load_json(auth)
user = load_json(user)
import time
t=time.time()
logger.info(str(auth["auth_expires_date"]-t))
if auth["auth_expires_date"]>t and user["user_expires_date"]>t:
return auth["auth_token"], user["user_token"]
except:
pass
return False, False
def get_constant(texto):
constants={}
constants["categorias"]={ "action": "Acción",
"comedy": "Comedia",
"family": "Familiar",
"history": "Histórico",
"mystery": "Misterio",
"sci-fi": "Ciencia Ficción",
"war": "Guerra",
"adventure": "Aventura",
"crime": "Crimen",
"fantasy": "Fantasía",
"horror": "Horror",
"news": "Actualidad",
"sport": "Deportes",
"western": "Western",
"animation": "Animación",
"documentary": "Documental",
"film-noir": "Cine Negro",
"music": "Música",
"drama": "Drama",
"musical": "Musical",
"romance": "Romance",
"thriller": "Thriller",
"reallity": "Reallity Show"}
constants["mediaType"]= {1:"series",
2:"movies",
3:"documentaries",
4:"tvshows",
5:"episode",
"series":1,
"movies":2,
"documentaries":3,
"tvshows":4,
"episode":5,
"": "None",
"None": "" }
constants["error"]= { "0":"Success response code",
"1":"INVALID_AUTH_TOKEN",
"2":"EMPTY_API_OR_SECRET",
"3":"EMPTY_USR_OR_PWD",
"4":"BAD_QUERY_SYNTAX",
"5":"OPERATION_DENIED",
"7":"INVALID_USER_TOKEN",
"8":"INVALID_MEDIATYPE",
"9":"MISSING_MEDIATYPE",
"10":"MISSING_IDM",
"11":"INVALID_HOST",
"12":"MISSING_IDV",
"13":"INVALID_LANGUAGE",
"15":"APP_NOT_FOUND",
"16":"APP_INACTIVE",
"17":"APP_CANT_LOGIN",
"18":"APP_MISSCONFIGURED",
"19":"METHOD_NOT_EXIST",
"20":"QUOTA_EXCEEDED",
"21":"APP_NOT_ALLOWED",
"22":"VIDEO_NOT_FOUND",
"30":"USER_OPERATION_DENIED",
"31":"USER_NOT_FOUND",
"32":"USER_INACTIVE",
"42":"NO_RESULTS_FOUND",
"50":"MEDIA_NOT_FOUND",
"51":"EMPTY_LINKS",
"52":"INVALID_EPISODES_IDS"}
return constants[texto]
def error_message(error):
try:
import xbmcgui
dialog=xbmcgui.Dialog()
text=get_constant("error")[str(error)]
dialog.ok("SERIES.LY", text)
except:
logger.info("se ha producido en un error "+str(error))
``` |
[
{
"content": "```python\n# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0... | [
{
"content": "<|memory_start|>```python\n# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/lice... | ```python
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark import context
from rally.common import log as logging
from rally import consts
from rally import osclients
LOG = logging.getLogger(__name__)
@context.context(name="create_flavor", order=1000)
class CreateFlavorContext(context.Context):
"""Create sample flavor
This sample create flavor with specified options before task starts and
delete it after task completion.
To create your own context plugin, inherit it from
rally.benchmark.context.Context
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False,
"properties": {
"flavor_name": {
"type": "string",
},
"ram": {
"type": "integer",
"minimum": 1
},
"vcpus": {
"type": "integer",
"minimum": 1
},
"disk": {
"type": "integer",
"minimum": 1
}
}
}
def setup(self):
"""This method is called before the task start."""
try:
# use rally.osclients to get necessary client instance
nova = osclients.Clients(self.context["admin"]["endpoint"]).nova()
# and than do what you need with this client
self.context["flavor"] = nova.flavors.create(
# context settings are stored in self.config
name=self.config.get("flavor_name", "rally_test_flavor"),
ram=self.config.get("ram", 1),
vcpus=self.config.get("vcpus", 1),
disk=self.config.get("disk", 1)).to_dict()
LOG.debug("Flavor with id '%s'" % self.context["flavor"]["id"])
except Exception as e:
msg = "Can't create flavor: %s" % e.message
if logging.is_debug():
LOG.exception(msg)
else:
LOG.warning(msg)
def cleanup(self):
"""This method is called after the task finish."""
try:
nova = osclients.Clients(self.context["admin"]["endpoint"]).nova()
nova.flavors.delete(self.context["flavor"]["id"])
LOG.debug("Flavor '%s' deleted" % self.context["flavor"]["id"])
except Exception as e:
msg = "Can't delete flavor: %s" % e.message
if logging.is_debug():
LOG.exception(msg)
else:
LOG.warning(msg)
``` |
[
{
"content": "Repeat the following code:\n```python\n'''\nEstablish custom log levels for rexlexer's verbose output.\n'''\nimport logging\nfrom rexlex.config import LOG_MSG_MAXWIDTH\n\n\n# ---------------------------------------------------------------------------\n# Establish custom log levels.\n# ------------... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n'''\nEstablish custom log levels for rexlexer's verbose output.\n'''\nimport logging\nfrom rexlex.config import LOG_MSG_MAXWIDTH\n\n\n# ---------------------------------------------------------------------------\n# Establish custom log levels.... | ```python
'''
Establish custom log levels for rexlexer's verbose output.
'''
import logging
from rexlex.config import LOG_MSG_MAXWIDTH
# ---------------------------------------------------------------------------
# Establish custom log levels.
# ---------------------------------------------------------------------------
# Used to report tokens getting yielded.
REXLEX_TRACE_RESULT = 9
# Used to report starting, stopping, etc.
REXLEX_TRACE_META = 8
# Used to report changes to lexer state.
REXLEX_TRACE_STATE = 7
# Used to report on specific rules.
REXLEX_TRACE_RULE = 6
# Used to dump as much info as possible.
REXLEX_TRACE = 5
REXLEX_LOG_LEVELS = (
(REXLEX_TRACE_RESULT, 'REXLEX_TRACE_RESULT', 'rexlex_trace_result'),
(REXLEX_TRACE_META, 'REXLEX_TRACE_META', 'rexlex_trace_meta'),
(REXLEX_TRACE_STATE, 'REXLEX_TRACE_STATE', 'rexlex_trace_state'),
(REXLEX_TRACE_RULE, 'REXLEX_TRACE_RULE', 'rexlex_trace_rule'),
(REXLEX_TRACE, 'REXLEX_TRACE', 'rexlex_trace'),
)
for loglevel, loglevel_name, method_name in REXLEX_LOG_LEVELS:
logging.addLevelName(loglevel, loglevel_name)
def rexlex_trace_result(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_RESULT):
self._log(REXLEX_TRACE_RESULT, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_result', rexlex_trace_result)
def rexlex_trace_meta(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_META):
self._log(REXLEX_TRACE_META, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_meta', rexlex_trace_meta)
def rexlex_trace_state(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_STATE):
self._log(REXLEX_TRACE_STATE, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_state', rexlex_trace_state)
def rexlex_trace_rule(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_RULE):
self._log(REXLEX_TRACE_RULE, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_rule', rexlex_trace_rule)
def rexlex_trace(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE):
self._log(REXLEX_TRACE, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace', rexlex_trace)
# ---------------------------------------------------------------------------
# Colorize them.
# ---------------------------------------------------------------------------
#
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved.
# Licensed under the new BSD license.
#
import ctypes
import logging
import os
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
#levels to (background, foreground, bold/intense)
if os.name == 'nt':
level_map = {
REXLEX_TRACE: (None, 'blue', True),
REXLEX_TRACE_RULE: (None, 'white', False),
REXLEX_TRACE_STATE: (None, 'yellow', True),
REXLEX_TRACE_META: (None, 'red', True),
REXLEX_TRACE_RESULT: ('red', 'white', True),
}
else:
level_map = {
REXLEX_TRACE: (None, 'blue', False),
REXLEX_TRACE_RULE: (None, 'white', False),
REXLEX_TRACE_STATE: (None, 'yellow', False),
REXLEX_TRACE_META: (None, 'red', False),
REXLEX_TRACE_RESULT: ('red', 'white', True),
}
csi = '\x1b['
reset = '\x1b[0m'
@property
def is_tty(self):
# bluff for Jenkins
if os.environ.get('JENKINS_URL'):
return True
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def emit(self, record):
try:
message = self.format(record)
stream = self.stream
if not self.is_tty:
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
if os.name != 'nt':
def output_colorized(self, message): # NOQA
self.stream.write(message)
else:
import re
ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m')
nt_color_map = {
0: 0x00, # black
1: 0x04, # red
2: 0x02, # green
3: 0x06, # yellow
4: 0x01, # blue
5: 0x05, # magenta
6: 0x03, # cyan
7: 0x07, # white
}
def output_colorized(self, message): # NOQA
parts = self.ansi_esc.split(message)
write = self.stream.write
h = None
fd = getattr(self.stream, 'fileno', None)
if fd is not None:
fd = fd()
if fd in (1, 2): # stdout or stderr
h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)
while parts:
text = parts.pop(0)
if text:
write(text)
if parts:
params = parts.pop(0)
if h is not None:
params = [int(p) for p in params.split(';')]
color = 0
for p in params:
if 40 <= p <= 47:
color |= self.nt_color_map[p - 40] << 4
elif 30 <= p <= 37:
color |= self.nt_color_map[p - 30]
elif p == 1:
color |= 0x08 # foreground intensity on
elif p == 0: # reset to default color
color = 0x07
else:
pass # error condition ignored
ctypes.windll.kernel32.SetConsoleTextAttribute(h,
color)
def colorize(self, message, record):
if record.levelno in self.level_map:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params),
'm', message, self.reset))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
if self.is_tty:
# Don't colorize any traceback
parts = message.split('\n', 1)
parts[0] = self.colorize(parts[0], record)
message = '\n'.join(parts)
return message
``` |
[
{
"content": "Repeat the code precisely:\n```python\n\"\"\"\n Copyright (C) 2020 Quinn D Granfor <spootdev@gmail.com>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2, as published by the Free Software Foundation.\n\n... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n\"\"\"\n Copyright (C) 2020 Quinn D Granfor <spootdev@gmail.com>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2, as published by the Free Software... | ```python
"""
Copyright (C) 2020 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import math
from common import common_internationalization
def com_pagination_page_calc(request):
page = int(request.args.get('page', 1))
offset = (page * int(request.ctx.session['per_page'])) - int(request.ctx.session['per_page'])
return page, offset
def com_pagination_boot_html(page, url, item_count=0,
client_items_per_page=30, format_number=True):
"""
Set items and count per page
"""
# if everything fits on one page, don't paginate.
if item_count < client_items_per_page:
return None
# return '', 0
# start pagination calculations
pages = math.ceil(item_count / client_items_per_page)
pagination_links = '<ul class="pagination">'
# only do previous if not on first page
if page > 1:
link_number = str(page - 1)
pagination_links += '<li class="page-item">' \
'<a class="page-link" href="' + url + '?page=' + link_number \
+ '" aria-label="Previous">' \
'<span aria-hidden="true">«</span>' \
'<span class="sr-only">Previous</span>' \
'</a>' \
'</li>'
# if less than ten pages, just display all the pages
if pages < 10:
build_start = 1
build_stop = pages
else:
build_start = page
build_stop = page + 10
if build_stop > pages:
build_stop = pages
for ndx in range(build_start, build_stop):
link_number = str(ndx)
if format_number:
page_number = common_internationalization.com_inter_number_format(ndx)
else:
page_number = str(ndx)
pagination_links += '<li class="page-item"><a class="page-link"' \
' href="' + url + '?page=' + link_number + '">' \
+ page_number + '</a></li>'
# only do next if not on last page
if page < pages:
link_number = str(page + 1)
pagination_links += '<li class="page-item">' \
'<a class="page-link" href="' + url + '?page=' + link_number \
+ '" aria-label="Next">' \
'<span aria-hidden="true">»</span>' \
'<span class="sr-only">Next</span>' \
'</a>' \
'</li>'
pagination_links += '</ul>'
return pagination_links
``` |
[
{
"content": "Here is the code block:\n```python\nimport view\r\nimport wx\r\nimport wx.gizmos as gizmos\r\nfrom cuttlebug.ui.controls import DictListCtrl\r\nfrom cuttlebug.util import ArtListMixin, has_icon, bidict, KeyTree, str2int\r\nfrom functools import partial\r\nimport cuttlebug.gdb as gdb\r\nimport os, ... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\nimport view\r\nimport wx\r\nimport wx.gizmos as gizmos\r\nfrom cuttlebug.ui.controls import DictListCtrl\r\nfrom cuttlebug.util import ArtListMixin, has_icon, bidict, KeyTree, str2int\r\nfrom functools import partial\r\nimport cuttlebug.gdb as gd... | ```python
import view
import wx
import wx.gizmos as gizmos
from cuttlebug.ui.controls import DictListCtrl
from cuttlebug.util import ArtListMixin, has_icon, bidict, KeyTree, str2int
from functools import partial
import cuttlebug.gdb as gdb
import os, threading
import cuttlebug.ui.menu as menu
import cuttlebug.settings as settings
import cuttlebug.project as project
import cuttlebug.ui.controls as controls
MNU_ENABLE_BKPT = 0
MNU_DISABLE_BKPT = 1
class RuntimeTree(gizmos.TreeListCtrl, ArtListMixin, KeyTree):
def __init__(self, parent):
self.parent = parent
gizmos.TreeListCtrl.__init__(self, id=-1, parent=parent, style=wx.TR_DEFAULT_STYLE | wx.TR_FULL_ROW_HIGHLIGHT | wx.TR_HIDE_ROOT | wx.TR_HAS_BUTTONS | wx.TR_LINES_AT_ROOT | wx.TR_EDIT_LABELS)
ArtListMixin.__init__(self)
KeyTree.__init__(self)
self.SetFont(wx.Font(8, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
self.parent = parent
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.on_expanding)
self.Bind(wx.EVT_TREE_ITEM_GETTOOLTIP, self.on_get_tooltip)
self.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.on_begin_label_edit)
self.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.on_end_label_edit)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.on_select_item)
#self.Bind(wx.EVT_LEFT_DOWN, self.on_left_down)
self.Bind(wx.EVT_LEFT_DCLICK, self.on_dclick)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.on_dclick)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.on_item_right_click)
self.Bind(wx.EVT_LIST_COL_END_DRAG, self.on_col_resize)
self.model = None
self.AddColumn('Context')
self.AddColumn('Value')
self.SetColumnEditable(1, True)
self.SetColumnAlignment(1, wx.ALIGN_RIGHT)
self.lock = threading.RLock()
self.__var_idx = 0
self.create_popup_menus()
self.clear()
self.load_positions()
def on_col_resize(self, evt):
self.save_positions()
def save_positions(self):
cols = self.GetColumnCount()
widths = [self.GetColumnWidth(i) for i in range(cols)]
settings.session_set('runtime_view_col_widths', widths)
def load_positions(self):
try:
widths = settings.session_get('runtime_view_col_widths')
cols = self.GetColumnCount()
if len(widths) != cols:
raise Exception("Mismatch of stored column widths")
for i, width in enumerate(widths):
self.SetColumnWidth(i, width)
except:
pass
def create_popup_menus(self):
self.menu_manager = menu.MenuManager()
m = self.menu_manager.menu()
m.item("Enable", func=self.on_enable_breakpoint, icon='stop.png', show=MNU_ENABLE_BKPT, hide=MNU_DISABLE_BKPT)
m.item("Disable", func=self.on_disable_breakpoint, icon='stop_disabled.png', show=MNU_DISABLE_BKPT, hide=MNU_ENABLE_BKPT)
m.item("Remove", func=self.on_remove_breakpoint, icon='ex.png')
self.menu_breakpoint_item = m
m = self.menu_manager.menu()
m.item("Enable All Breakpoints", func=self.on_enable_all_breakpoints, icon='stop.png')
m.item("Disable All Breakpoints", func=self.on_disable_all_breakpoints, icon='stop_disabled.png')
m.item("Remove All Breakpoints", func=self.on_remove_all_breakpoints, icon='ex.png')
self.menu_breakpoints = m
m = self.menu_manager.menu()
m.item("Show this Frame in Source", func=self.on_show_frame, icon='find.png')
m.step_out = m.item("Step Out\tShift+F6", func=self.on_step_out, icon='control_play_blue.png')
self.menu_frame_item = m
m = self.menu_manager.menu()
m.item("Add Watch...", func=self.on_add_watch, icon='magnifier_zoom_in.png')
self.menu_watches = m
m = self.menu_manager.menu()
m.item("Remove Watch", func=self.on_remove_watch, icon='ex.png')
self.menu_watch_item = m
def set_model(self, model):
self.model = model
self.model.Bind(gdb.EVT_GDB_UPDATE_VARS, self.on_var_update)
self.model.Bind(gdb.EVT_GDB_UPDATE_STACK, self.on_stack_update)
self.model.Bind(gdb.EVT_GDB_UPDATE_BREAKPOINTS, self.on_breakpoint_update)
self.model.Bind(gdb.EVT_GDB_UPDATE_REGISTERS, self.on_register_update)
self.model.Bind(gdb.EVT_GDB_FINISHED, self.on_gdb_finished)
self.model.Bind(gdb.EVT_GDB_STOPPED, self.on_gdb_stopped)
wx.CallAfter(self.build_sfr_tree)
def get_var_name(self):
name = "rtv_%d" % self.__var_idx
self.__var_idx += 1
return name
def on_breakpoint_update(self, evt):
wx.CallAfter(self.update_breakpoints)
def on_register_update(self, evt):
wx.CallAfter(self.update_registers, evt.data)
self.save_positions()
def on_var_update(self, evt):
names = evt.data
for name in names:
if name in self.pending_var_additions:
self.lock.acquire()
parent = self.pending_var_additions.pop(name)
self.lock.release()
wx.CallAfter(self.add_var_item, parent, name, self.model.vars[name])
if parent == self.watch_item:
self.expand(self.watch_item)
elif name in self.pending_var_updates:
self.lock.acquire()
var_item = self.pending_var_updates.pop(name)
old_name= self.get_item_data(var_item)
if old_name in self.var_registry:
self.var_registry.pop(old_name)
self.lock.release()
wx.CallAfter(self.update_var_item, var_item, name, self.model.vars[name])
elif name in self.var_registry and name in self.model.vars:
var_item = self.var_registry[name]
wx.CallAfter(self.update_var_item, var_item, name, self.model.vars[name])
else:
pass
def on_stack_update(self, evt):
#print self.model.stack.pretty()
if self.model:
if self.__check_stack():
wx.CallAfter(self.update_stack)
else:
wx.CallAfter(self.rebuild_stack)
evt.Skip()
def on_gdb_finished(self, evt):
self.clear()
self.model = None
def on_item_right_click(self, evt):
item = self.__get_evt_item(evt)
if item.is_ok():
self.select_item(item)
if self.model:
if item == self.breakpoints_item and self.get_children_count(self.breakpoints_item) > 0:
self.PopupMenu(self.menu_breakpoints.build(self), evt.GetPoint())
elif self.is_descendent(item, self.breakpoints_item):
bkpt = self.get_item_data(item)
self.breakpoint = bkpt
self.menu_manager.publish(MNU_DISABLE_BKPT) if bkpt.enabled else self.menu_manager.publish(MNU_ENABLE_BKPT)
self.PopupMenu(self.menu_breakpoint_item.build(self), evt.GetPoint())
elif self.is_frame_item(item):
frame = self.get_item_data(item)
self.frame = frame
if frame.level == 0 and len(self.frames) > 1:
self.menu_frame_item.step_out.show()
else:
self.menu_frame_item.step_out.hide()
self.PopupMenu(self.menu_frame_item.build(self), evt.GetPoint())
elif item == self.watch_item:
self.PopupMenu(self.menu_watches.build(self), evt.GetPoint())
elif self.is_descendent(item, self.watch_item):
self.selected_item = item
self.PopupMenu(self.menu_watch_item.build(self), evt.GetPoint())
evt.Skip()
def on_dclick(self, evt):
id = self.__get_evt_item(evt)
if self.model and self.is_descendent(id, self.breakpoints_item):
bkpt = self.get_item_data(id)
if bkpt.enabled:
self.model.break_disable(bkpt)
else:
self.model.break_enable(bkpt)
elif self.model and self.is_descendent(id, self.sfr_item):
reg = self.get_item_data(id)
if reg:
old_value = reg.value
try:
response = controls.RegisterEditDialog.show(self, reg)
except Exception, e:
print e
if response == wx.ID_OK:
self.model.data_evaluate_expression("%s=%s" % (reg.expression, reg.value), callback=partial(self.on_sfr_data, id,True))
else:
reg.value = old_value
elif self.model and self.is_descendent(id, self.registers_item):
name = self.get_item_data(id)
target_model = self.parent.controller.project.target
reg = target_model.find_by_name(name)
if not reg:
reg = project.CPURegister(name, name, 4)
reg.add_field(project.Field(0, 32, name))
reg.value = str2int(self.register_registry[name])
response = controls.RegisterEditDialog.show(self, reg)
if response == wx.ID_OK:
self.model.data_evaluate_expression("%s=%s" % (reg.expression, reg.value),callback=self.on_register_data)
evt.Skip()
def on_register_data(self, evt):
self.model.update()
def on_begin_label_edit(self, evt):
item = self.get_event_item(evt)
name = self.get_item_data(item)
if name in self.var_registry:
if self.is_descendent(item, self.get_frame_items()[-1]):
evt.Skip()
return
if self.is_descendent(item, self.sfr_item) or self.is_descendent(item, self.watch_item):
evt.Skip()
return
evt.Veto()
def on_select_item(self, evt):
#item = self.get_event_item(evt)
#print self.get_item_data(item)
evt.Veto()
#evt.Skip()
def on_end_label_edit(self, evt):
item = self.get_event_item(evt)
name = self.get_item_data(item)
if name in self.var_registry and name in self.model.vars:
new_var_value = evt.GetLabel()
self.model.var_assign(name, new_var_value)
if self.is_descendent(item, self.sfr_item) or self.is_descendent(item, self.watch_item):
reg = self.get_item_data(item)
if hasattr(reg, 'expression'):
self.model.data_evaluate_expression('%s=%s' % (reg.expression, evt.GetLabel()), callback=partial(self.on_sfr_data, item,True))
evt.Veto()
def on_get_tooltip(self, evt):
item = self.get_event_item(evt)
if self.model and item:
if item == self.stack_item:
evt.SetToolTip(wx.ToolTip("Stack Depth: %d frames" % self.model.stack.depth))
data = self.get_item_data(item)
if hasattr(data, 'file'): # This is a stack frame
evt.SetToolTip(wx.ToolTip("Stack frame %s() at 0x%x %s" % (data.func, data.addr, "in file %s" % data.file if data.file else "")))
elif data in self.var_registry:
evt.SetToolTip(wx.ToolTip(self.model.vars[data].expression))
def on_expanding(self, evt):
item=self.get_event_item(evt)
item_data=self.get_item_data(item)
if self.is_descendent(item, self.sfr_item):
self.update_sfr_tree(item, force_root=True, colorize=False)
return
if hasattr(item_data, 'level') and self.get_children_count(item, False) == 0: #item_data is a stack frame, and we wish to list its locals
if not self.model.running:
self.model.stack_list_arguments(frame=item_data.level, callback=partial(self.__on_listed_arguments, item))
else:
evt.Veto()
elif item_data in self.var_registry and self.get_children_count(item, False) == 0:
if not self.model.running:
self.model.var_list_children(item_data, callback=partial(self.__on_listed_children, item))
else:
evt.Veto()
evt.Skip()
def __on_listed_children(self, parent, result):
names = []
if hasattr(result, 'children'):
for child in result.children:
varname= child['child']['name']
self.lock.acquire()
self.pending_var_additions[varname] = parent
self.lock.release()
names.append(varname)
class Dummy(object): pass
evt = Dummy()
evt.data = names
wx.CallAfter(self.on_var_update, evt)
def __on_listed_locals(self, frame_item, args, result):
if result.cls != 'error':
if hasattr(result, 'locals') and frame_item.is_ok():
frame = self.get_item_data(frame_item)
if self.get_children_count(frame_item, recursive=False) == 0:
for item in args + result.locals:
varname = self.get_var_name()
self.lock.acquire()
self.pending_var_additions[varname] = frame_item
self.lock.release()
self.model.var_create(item['name'], frame=frame.level, callback=self.__on_created_var, name=varname)
def __on_listed_arguments(self, frame_item, result):
if result.cls != 'error':
if 'stack-args' in result and frame_item.is_ok():
frame = self.get_item_data(frame_item)
f = result['stack-args'][frame.level]['frame']
if int(f['level']) != frame.level:
raise ValueError("Failed Sanity Check!")
args = f['args']
self.model.stack_list_locals(frame=frame.level, callback=partial(self.__on_listed_locals, frame_item, args))
def __on_created_var(self, result):
if hasattr(result, 'name'):
self.model.var_update(result.name)
def add_var_item(self, parent, name, var):
if parent.is_ok():
var_item = self.append_item(parent, var.expression.strip('"'))
self.update_var_item(var_item, name, var)
def update_var_item(self, var_item, name, var):
if var_item.is_ok():
self.set_item_data(var_item, name)
if var.children:
self.set_item_has_children(var_item, bool(var.children))
else:
self.set_item_has_children(var_item, False)
self.set_item_text(var_item, var.data, 1)
icon_name = var.type.icon_name
if has_icon(icon_name):
self.set_item_art(var_item, icon_name)
self.lock.acquire()
self.var_registry[name] = var_item
self.lock.release()
def add_watch(self, s):
vn = self.get_var_name()
self.lock.acquire()
self.pending_var_additions[vn] = self.watch_item
self.lock.release()
self.model.var_create(s, floating=True, callback=self.__on_created_var, name=vn)
def on_add_watch(self, evt):
dlg = wx.TextEntryDialog(self, "Watch Variable", self.last_watch)
if dlg.ShowModal() == wx.ID_OK:
var = dlg.GetValue().strip()
self.add_watch('"%s"' % var) # Quoting the watch allows spaces
def on_remove_watch(self, evt):
item = self.get_item_data(self.selected_item)
self.model.var_delete(item, callback=partial(self.on_watch_deleted, self.selected_item))
def on_watch_deleted(self, watch_item, evt):
self.delete(watch_item)
def scrub_vars(self, all_vars=False):
#TODO use a list
to_update = {}
if self.get_frame_count() > 0:
frame_items = self.get_frame_items()
for name, var_item in self.var_registry.iteritems():
if (not self.is_descendent(var_item, frame_items[-1]) or all_vars) and name in self.model.vars:
var = self.model.vars[name]
frame = self.get_var_frame(name)
if frame:
varname = self.get_var_name()
to_update[(name, varname)] = (frame, var)
self.pending_var_updates[varname] = var_item
for (old_name, new_name), (frame, var)in to_update.iteritems():
self.model.var_delete(old_name)
self.model.var_create(var.expression, frame=frame.level, callback=self.__on_created_var, name=new_name)
def get_frame_items(self):
return list(self.children(self.stack_item)) if self.stack_item.is_ok() else []
def get_frames(self):
return [self.get_item_data(frame_item) for frame_item in self.get_frame_items()]
def get_frame_count(self):
if self.stack_item.is_ok():
return self.get_children_count(self.stack_item, recursive=False)
else:
return 0
def is_frame_item(self, item):
return item.is_ok() and isinstance(self.get_item_data(item), gdb.GDBStackFrame)
def add_frame_item(self, frame):
item = self.append_item(self.stack_item, frame.func + "( )")
self.update_frame_item(item, frame)
def update_frame_item(self, frame_item, frame):
self.set_item_data(frame_item, frame)
self.set_item_art(frame_item, 'frame.png' if frame.level != 0 else 'frame_active.png')
self.set_item_has_children(frame_item, True)
self.set_item_bold(frame_item, True)
self.set_item_data(frame_item, frame)
def on_show_frame(self, evt):
if self.model and self.frame:
self.GetParent().controller.goto(self.frame.file, self.frame.line)
self.frame = None
def __check_stack(self):
if self.model:
# Our list of frames is reversed from the models, because that's how we view it.
for model_frame, view_frame in zip(reversed(self.model.stack), self.get_frames()):
if model_frame.key != view_frame.key: return False
return True
def get_var_frame(self, name):
frame = None
item = self.var_registry[name]
frames = self.get_frames()
while frame not in frames:
item = self.get_parent(item)
if item.is_ok():
frame = self.get_item_data(item)
else:
return None
return frame
def on_step_out(self, evt):
self.parent.controller.step_out()
def clear_stack(self):
n = self.get_frame_count()
for i in range(n):
self.pop_stack_frame()
def rebuild_stack(self):
self.clear_stack()
self.update_stack()
def update_stack(self):
stack = self.model.stack
stack_changed=False
# If the frame count in the view is > the frame count in the model, pop off until they match (tossing frames that no longer exist)
n = self.get_frame_count()-len(stack)
if n > 0:
for i in range(n):
self.pop_stack_frame()
stack_changed = True
for frame_item, frame in zip(self.get_frame_items(), reversed(self.model.stack)):
self.update_frame_item(frame_item, frame)
# Otherwise add frames until we're all in sync
idx = self.get_frame_count()+1
while self.get_frame_count() < len(self.model.stack):
frame = stack[len(stack)-idx]
self.add_frame_item(frame)
idx += 1
self.scrub_vars(all_vars=stack_changed)
def pop_stack_frame(self):
frame_item = self.get_frame_items()[-1]
if frame_item.is_ok():
for child in self.walk(frame_item):
name = self.get_item_data(child)
if name in self.var_registry:
self.var_registry.pop(name)
self.model.var_delete(name)
self.delete(frame_item)
else:
print "Can't remove frame. Frame item is NOT ok."
def update_breakpoints(self):
if self.model and self.breakpoints_item.is_ok():
breakpoints = self.model.breakpoints
self.delete_children(self.breakpoints_item)
for bp in breakpoints:
if bp.fullname:
name = os.path.split(os.path.abspath(bp.fullname))[1]
else:
name = '0x%x' % bp.address
item = self.append_item(self.breakpoints_item, name)
self.set_item_data(item, bp)
self.set_item_text(item, str(bp.line), 1)
self.set_item_art(item, 'stop.png' if bp.enabled else 'stop_disabled.png')
def on_enable_breakpoint(self, evt):
if self.breakpoint and self.model:
self.model.break_enable(self.breakpoint)
self.breakpoint = None
def on_disable_breakpoint(self, evt):
if self.breakpoint and self.model:
self.model.break_disable(self.breakpoint)
self.breakpoint = None
def on_remove_breakpoint(self, evt):
if self.breakpoint and self.model:
self.model.break_delete(self.breakpoint)
self.breakpoint = None
def on_enable_all_breakpoints(self, evt):
if self.model:
for bkpt in self.model.breakpoints:
self.model.break_enable(bkpt)
def on_disable_all_breakpoints(self, evt):
if self.model:
for bkpt in self.model.breakpoints:
self.model.break_disable(bkpt)
def on_remove_all_breakpoints(self, evt):
if self.model:
for bkpt in self.model.breakpoints:
self.model.break_delete(bkpt)
def update_registers(self, names):
'''
if self.model and self.registers_item.is_ok():
registers = self.model.registers
if len(registers) != self.get_children_count(self.registers_item, recursive=False):
self.delete_children(self.registers_item)
for key, value in registers.iteritems():
item = self.append_item(self.registers_item, key)
self.set_item_text(item, value, 1)
self.set_item_data(item, key)
self.register_registry[key] = value
else:
for child in self.children(self.registers_item):
self.set_item_text_colour(child, wx.BLACK)
for name in names:
item = self.register_registry[name]
print item
self.set_item_text(item, registers[name], 1)
self.set_item_text_colour(item, wx.RED)
'''
def build_sfr_tree(self):
if not self.parent.controller.project:
return
self.delete_children(self.sfr_item)
target_model = self.parent.controller.project.target
def walk(self, tree_item, item):
if isinstance(item, project.Group):
group_item = self.append_item(tree_item, item.name)
for child in item.items:
walk(self, group_item, child)
elif isinstance(item, project.Peripheral):
peripheral_item = self.append_item(tree_item, item.name)
for child in item.registers:
walk(self, peripheral_item, child)
elif isinstance(item, project.SpecialFunctionRegister):
sfr_item = self.append_item(tree_item, item.fullname)
self.set_item_data(sfr_item, item)
tree_item = self.sfr_item
for item in target_model.items:
walk(self, tree_item, item)
def on_gdb_stopped(self, evt):
self.update_sfr_tree(self.sfr_item)
evt.Skip()
def update_sfr_tree(self, sfr_item, force_root=False, colorize=True):
if force_root:
items = self.children(sfr_item)
else:
items = [sfr_item]
for i in items:
for tree_item in self.walk_expanded(i, False):
item = self.get_item_data(tree_item)
if hasattr(item, 'expression'):
self.model.data_evaluate_expression(item.expression, callback=partial(self.on_sfr_data, tree_item, colorize))
def on_sfr_data(self, item, colorize, data):
if data.cls == "done" and hasattr(data, 'value'):
wx.CallAfter(self.update_sfr_value, item, data.value, colorize)
def update_sfr_value(self, item, value, colorize=True):
current_value = self.get_item_text(item, 1)
try:
reg = self.get_item_data(item)
reg.value = int(value)
text = "0x%08x" % int(value)
except:
text = value
self.set_item_text(item, text, 1)
if current_value != text and colorize:
self.set_item_text_colour(item, wx.RED)
else:
self.set_item_text_colour(item, wx.BLACK)
def update(self):
pass
def clear(self):
self.last_watch = ""
self.DeleteAllItems()
self.root_item = self.add_root('root')
self.stack_item = self.append_item(self.root_item,'Call Stack')
self.breakpoints_item = self.append_item(self.root_item, 'Breakpoints')
self.registers_item = self.append_item(self.root_item, 'CPU Registers')
self.watch_item = self.append_item(self.root_item, 'Watch')
self.sfr_item = self.append_item(self.root_item, 'HW Registers')
self.set_item_art(self.registers_item, 'chip.png')
self.set_item_art(self.stack_item, 'stack.png')
self.set_item_art(self.breakpoints_item, 'breakpoint.png')
self.set_item_art(self.watch_item, 'magnifier.png')
self.set_item_art(self.sfr_item, 'application_view_list.png')
self.lock.acquire()
self.frames = [] # Frame keys to tree items
self.var_registry = bidict() # Var names to tree items
self.pending_var_additions = {}
self.pending_var_updates = {}
self.register_registry = bidict()
self.lock.release()
self.breakpoint = None
def __get_evt_item(self, evt):
item = evt.GetItem()
if item and item.IsOk():
try:
return self.get_key(item)
except:
return None
pt = evt.GetPosition()
items = self.HitTest(pt)
try:
return self.get_key(items[0])
except:
return None
def set_item_art(self, item, name, style=wx.TreeItemIcon_Normal):
if name not in self.art:
self.add_art(name)
if item.is_ok():
self.set_item_image(item, self.art[name], style)
else:
print "Tried to set art for item that's NOT ok?"
class RuntimeView(view.View):
def __init__(self, *args, **kwargs):
super(RuntimeView, self).__init__(*args, **kwargs)
self.tree = RuntimeTree(self)
# self.tree.Bind(wx.EVT_KEY_DOWN, self.tree.on_key_down)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.tree, 1, wx.EXPAND)
self.SetSizer(sizer)
def set_model(self, model):
self.tree.set_model(model)
def update(self, stack):
self.tree.update()
def add_watch(self, s):
self.tree.add_watch(s)
class GDBDebugView(view.View):
def __init__(self, *args, **kwargs):
super(GDBDebugView, self).__init__(*args, **kwargs)
self.list = DictListCtrl(self, color_changes=False)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.list, 1, wx.EXPAND)
self.SetSizer(sizer)
def set_model(self, model):
self.model = model
print "Binding the var update"
self.model.Bind(gdb.EVT_GDB_UPDATE_VARS, self.on_var_update)
def on_var_update(self, evt):
for name in evt.data:
if name in self.model.vars:
self.list[name] = self.model.vars[name].data
else:
del self.list[name]
evt.Skip()
``` |
[
{
"content": "```python\nimport time\nimport sys\nimport RPi.GPIO as GPIO\n\noff = '1242424352424342424242424242425342524342'\nb0 = '12424243524243424242424242424242424242424242'\nb1 = '124242435242434242424242424242534242424242'\nb2 = '1242424352424342424242424242425353424242'\nb3 = '12424243524243424242424242... | [
{
"content": "<|memory_start|>```python\nimport time\nimport sys\nimport RPi.GPIO as GPIO\n\noff = '1242424352424342424242424242425342524342'\nb0 = '12424243524243424242424242424242424242424242'\nb1 = '124242435242434242424242424242534242424242'\nb2 = '1242424352424342424242424242425353424242'\nb3 = '1242424352... | ```python
import time
import sys
import RPi.GPIO as GPIO
off = '1242424352424342424242424242425342524342'
b0 = '12424243524243424242424242424242424242424242'
b1 = '124242435242434242424242424242534242424242'
b2 = '1242424352424342424242424242425353424242'
b3 = '124242435242434242424242424242424253424242'
b4 = '124242435242434242424242424242524342424242'
b5 = '124242435242434242424242424242425342424242'
b6 = '1242424352424342424242424242425342534242'
b7 = '124242435242434242424242424242424242534242'
b8 = '124242435242434242424242424242524243424242'
b9 = '124242435242434242424242424242425243424242'
if sys.argv[1:] == 'off':
NUM_ATTEMPTS = 1300
else:
NUM_ATTEMPTS = 170
TRANSMIT_PIN = 17
def transmit_code(code):
'''Transmit a chosen code string using the GPIO transmitter'''
GPIO.setmode(GPIO.BCM)
GPIO.setup(TRANSMIT_PIN, GPIO.OUT)
for t in range(NUM_ATTEMPTS):
for i in code:
if i == '1':
GPIO.output(TRANSMIT_PIN, 1)
time.sleep(.00055);
GPIO.output(TRANSMIT_PIN, 0)
elif i == '2':
GPIO.output(TRANSMIT_PIN, 0)
time.sleep(.00011);
GPIO.output(TRANSMIT_PIN, 1)
elif i == '3':
GPIO.output(TRANSMIT_PIN, 0)
time.sleep(.000303);
GPIO.output(TRANSMIT_PIN, 1)
elif i == '4':
GPIO.output(TRANSMIT_PIN, 1)
time.sleep(.00011);
GPIO.output(TRANSMIT_PIN, 0)
elif i == '5':
GPIO.output(TRANSMIT_PIN, 1)
time.sleep(.00029);
GPIO.output(TRANSMIT_PIN, 0)
else:
continue
GPIO.output(TRANSMIT_PIN, 0)
GPIO.cleanup()
if __name__ == '__main__':
for argument in sys.argv[1:]:
exec('transmit_code(' + str(argument) + ')')
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Locating Restriction Sites\n\nUsage:\n REVP.py <input> [--compare] [--max=MAX] [--min=MIN]\n REVP.py (--help | --version)\n\nOptions:\n --compare run a speed comparison of various methods\n ... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Locating Restriction Sites\n\nUsage:\n REVP.py <input> [--compare] [--max=MAX] [--min=MIN]\n REVP.py (--help | --version)\n\nOptions:\n --compare run a speed comparison of var... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Locating Restriction Sites
Usage:
REVP.py <input> [--compare] [--max=MAX] [--min=MIN]
REVP.py (--help | --version)
Options:
--compare run a speed comparison of various methods
--max=MAX Set the maximum length of palindrome to search for, even
numbers should be used
--min=MIN Set the minimum length of palindrome to search for, even
numbers should be used. Less than 4 is not recommended.
-h --help show this help message and exit
-v --version show version and exit
"""
problem_description = """Locating Restriction Sites
Problem
A DNA string is a reverse palindrome if it is equal to its reverse complement.
For instance, GCATGC is a reverse palindrome because its reverse complement is
GCATGC. See Figure 2.
Given: A DNA string of length at most 1 kbp in FASTA format.
Return: The position and length of every reverse palindrome in the string having
length between 4 and 12. You may return these pairs in any order.
Sample Dataset
>Rosalind_24
TCAATGCATGCGGGTCTATATGCAT
Sample Output
4 6
5 4
6 6
7 4
17 4
18 4
20 6
21 4
"""
from docopt import docopt
from time import time
def parse_fasta_sequence(inp_file):
with open(inp_file, 'r') as inp:
name = inp.readline().strip()[1:]
sequence = ''
for line in inp.readlines():
sequence += line.strip()
return name, sequence
def palindromes_by_nuclei(sequence, max_pal=12, min_pal=4):
'''
Checks for reverse palindromes in a DNA sequence; acts as a generator that
will yield the starting offset of a palindrome along with its length.
'''
comp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
for offset in range(len(sequence)):
mod = 0
#Length is twice the mod value
try:
while sequence[offset - mod] == comp[sequence[offset + mod + 1]]:
mod += 1
if mod * 2 >= min_pal:
yield offset - mod + 1, mod * 2
if mod * 2 >= max_pal or offset - mod < 0:
break
except IndexError: # Expanded past sequence length
pass
def main(max_pal, min_pal):
if max_pal is not None:
max_pal = int(max_pal)
if min_pal is not None:
min_pal = int(min_pal)
name, sequence = parse_fasta_sequence(arguments['<input>'])
for offset, length in palindromes_by_nuclei(sequence, max_pal, min_pal):
print('{0} {1}'.format(offset + 1, length))
def compare(max_pal, min_pal):
if max_pal is not None:
max_pal = int(max_pal)
if min_pal is not None:
min_pal = int(min_pal)
name, sequence = parse_fasta_sequence(arguments['<input>'])
start = time()
for i in range(100):
for offset, length in palindromes_by_nuclei(sequence, max_pal, min_pal):
pass
print('''It took {0} seconds to complete 100 iterations of the Palindrome
by Nuclei search.\n'''.format(time() - start))
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.0.1')
if arguments['--compare']:
compare(arguments['--max'], arguments['--min'])
else:
main(arguments['--max'], arguments['--min'])
``` |
[
{
"content": "```python\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/li... | [
{
"content": "<|memory_start|>```python\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://w... | ```python
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from language.boolq.utils import ops
import tensorflow.compat.v1 as tf
class OpsTest(tf.test.TestCase):
def test_lowercase(self):
with self.test_session() as sess:
test_str = [["Abc%@||", "DZ dzD", ""]]
self.assertEqual(
sess.run(ops.lowercase_op(tf.convert_to_tensor(test_str))).tolist(),
[[x.lower() for x in test_str[0]]])
def test_lowercase_unicode(self):
with self.test_session() as sess:
test_str = ["ŠČŽɬЩЮɦ"]
self.assertEqual(
sess.run(ops.lowercase_op(tf.convert_to_tensor(test_str))).tolist(),
[test_str[0].lower()])
def test_bucket_by_quantiles(self):
with self.test_session() as sess:
data = tf.data.Dataset.from_tensor_slices(list(range(10))).repeat()
data = data.apply(ops.bucket_by_quantiles(
len_fn=lambda x: x, batch_size=4, n_buckets=2,
hist_bounds=[2, 4, 6, 8]))
it = data.make_initializable_iterator()
sess.run(it.initializer)
sess.run(tf.local_variables_initializer())
next_op = it.get_next()
# Let the model gather statistics, it sees 4*5=20 = 2 epochs,
# so each bin should have a count of 4
for _ in range(5):
sess.run(next_op)
counts = sess.run(tf.local_variables()[0])
self.assertEqual(counts.tolist(), [4, 8, 12, 16, 20])
# At this point the model should perfectly quantize the input
for _ in range(4):
out = sess.run(next_op)
if out[0] < 5:
self.assertAllInRange(out, 0, 5)
else:
self.assertAllInRange(out, 5, 10)
if __name__ == "__main__":
tf.test.main()
``` |
[
{
"content": "Produce an exact reconstruction of the code:\n```python\n#! /usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom DataTools import writeDataToFile\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--time-step',dest='time_step',required=False)\n... | [
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n#! /usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom DataTools import writeDataToFile\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--time-step',dest='time_step',r... | ```python
#! /usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from DataTools import writeDataToFile
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--time-step',dest='time_step',required=False)
parser.add_argument('--output-file',dest='fn_out',required=False)
args = parser.parse_args()
# Parameters of potential
m = 1.0
k = (2.0*np.pi)**2
angular_freq = np.sqrt(k/m)
freq = angular_freq/(2.0*np.pi)
period = 1.0/freq
# MD Parameters
if(args.time_step):
time_step = np.float64(args.time_step)
else:
time_step = 0.01*period
if(args.fn_out):
fn_out = args.fn_out
else:
fn_out = 'results.data'
showPlots = False
#num_periods = 20
#num_steps = np.int(np.rint( (num_periods*period)/time_step ))
num_steps = 10000
# initial postion and velocity at t=0
initial_position = 2.0
initial_velocity = 0.0
def getPotentialEnergy(x):
potential_ener = 0.5*k*x**2
return potential_ener
#-------------------------------
def getForce(x):
force = -k*x
return force
#-------------------------------
def getAccleration(x):
return getForce(x)/m
#-------------------------------
def getPotentialAndForce(x):
return ( getPotentialEnergy(x), getForce(x) )
#-------------------------------
def getKineticEnergy(v):
kinetic_ener = 0.5*m*v**2
return kinetic_ener
#-------------------------------
def getTotalEnergy(x,v):
return getPotentialEnergy(x)+getKineticEnergy(v)
#-------------------------------
# analytical solution:
phi = np.arctan(-initial_velocity/(initial_position*angular_freq))
amplitude = initial_position/np.cos(phi)
conserved_energy = getPotentialEnergy(amplitude)
# ----------------------
times = []
positions = []
velocites = []
pot_energies = []
kin_energies = []
tot_energies = []
time = 0.0
curr_position = initial_position
prev_position = curr_position-initial_velocity*time_step + 0.5*getAccleration(curr_position)*time_step**2
curr_velocity = initial_velocity
for i in range(num_steps):
if (i+1) % (num_steps/10) == 0:
print 'MD step {0:6d} of {1:6d}'.format(i+1,num_steps)
# get force at t
accleration = getAccleration(curr_position)
# get new position at t+dt
new_position = 2.0*curr_position - prev_position + accleration*time_step**2
# get velocity at t
curr_velocity = (new_position - prev_position) / (2.0*time_step)
# get energies at t
curr_pot_ener = getPotentialEnergy(curr_position)
curr_kin_ener = getKineticEnergy(curr_velocity)
curr_tot_ener = curr_pot_ener + curr_kin_ener
#
times.append( time )
positions.append( curr_position )
velocites.append( curr_velocity )
pot_energies.append( curr_pot_ener )
kin_energies.append( curr_kin_ener )
tot_energies.append( curr_tot_ener )
#
prev_position = curr_position
curr_position = new_position
time += time_step
#
#----------------------------------------
times = np.array(times)
positions = np.array(positions)
velocites = np.array(velocites)
pot_energies = np.array(pot_energies)
kin_energies = np.array(kin_energies)
tot_energies = np.array(tot_energies)
positions_analytical = amplitude*np.cos(angular_freq*times+phi)
velocites_analytical = -angular_freq*amplitude*np.sin(angular_freq*times+phi)
writeDataToFile(fn_out,
[times,positions,velocites,pot_energies,kin_energies,tot_energies,positions_analytical,velocites_analytical],
['time','pos','vel','pot_ene','kin_ene','tot_ene','pos_an','vel_an'],
constantsNames=['time_step','period','amplitude','k','m','phi','conserved_energy'],
constantsValues=[time_step,period,amplitude,k,m,phi,conserved_energy],
dataFormat='%15.8f')
if showPlots:
plt.figure(1)
plt.plot(times,tot_energies)
plt.plot(times,pot_energies)
plt.plot(times,kin_energies)
plt.show()
plt.figure(2)
plt.plot(times,pot_energies)
plt.show()
plt.figure(3)
plt.plot(times,kin_energies)
plt.show()
plt.figure(4)
plt.plot(times,velocites)
plt.show()
plt.figure(5)
plt.plot(times,positions)
plt.plot(times,positions_analytical)
plt.show()
plt.figure(6)
plt.plot(times,positions-positions_analytical)
plt.show()
#
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n# -*- coding: utf-8 -*-\n#\n# method.py - commander\n#\n# Copyright (C) 2010 - Jesse van den Kieboom\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# ... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# method.py - commander\n#\n# Copyright (C) 2010 - Jesse van den Kieboom\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as p... | ```python
# -*- coding: utf-8 -*-
#
# method.py - commander
#
# Copyright (C) 2010 - Jesse van den Kieboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
import commands.exceptions as exceptions
import types
import inspect
import sys
import utils
class Method:
def __init__(self, method, name, parent):
self.method = method
self.real_name = name
self.name = name.replace('_', '-')
self.parent = parent
self._func_props = None
def __str__(self):
return self.name
def autocomplete_func(self):
if hasattr(self.method, 'autocomplete'):
return getattr(self.method, 'autocomplete')
return None
def accelerator(self):
if hasattr(self.method, 'accelerator'):
return getattr(self.method, 'accelerator')
return None
def args(self):
fp = self.func_props()
return fp.args, fp.varargs
def func_props(self):
if not self._func_props:
# Introspect the function arguments
self._func_props = utils.getargspec(self.method)
return self._func_props
def commands(self):
return []
def cancel(self, view):
if self.parent:
self.parent.cancel(view, self)
def cancel_continuation(self, view):
if self.parent:
self.parent.continuation(view, self)
def doc(self):
if self.method.__doc__:
return self.method.__doc__
else:
return ''
def oneline_doc(self):
return self.doc().split("\n")[0]
def execute(self, argstr, words, entry, modifier, kk = {}):
fp = self.func_props()
kwargs = {'argstr': argstr, 'args': words, 'entry': entry, 'view': entry.view(), 'modifier': modifier, 'window': entry.view().get_toplevel()}
oargs = list(fp.args)
args = []
idx = 0
if fp.defaults:
numdef = len(fp.defaults)
else:
numdef = 0
for k in fp.args:
if k in kwargs:
args.append(kwargs[k])
oargs.remove(k)
del kwargs[k]
elif idx >= len(words):
if numdef < len(oargs):
raise exceptions.Execute('Invalid number of arguments (need %s)' % (oargs[0],))
else:
args.append(words[idx])
oargs.remove(k)
idx += 1
# Append the rest if it can handle varargs
if fp.varargs and idx < len(words):
args.extend(words[idx:])
if not fp.keywords:
kwargs = {}
for k in kk:
kwargs[k] = kk[k]
return self.method(*args, **kwargs)
def __lt__(self, other):
if isinstance(other, Method):
return self.name < other.name
else:
return self.name < other
# vi:ex:ts=4:et
``` |
[
{
"content": "Recreate the original code text:\n```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# licen... | ```python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Usage(Model):
"""Describes network resource usage.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource identifier.
:vartype id: str
:ivar unit: Required. An enum describing the unit of measurement. Default
value: "Count" .
:vartype unit: str
:param current_value: Required. The current value of the usage.
:type current_value: long
:param limit: Required. The limit of usage.
:type limit: long
:param name: Required. The name of the type of usage.
:type name: ~azure.mgmt.network.v2018_01_01.models.UsageName
"""
_validation = {
'id': {'readonly': True},
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(self, *, current_value: int, limit: int, name, **kwargs) -> None:
super(Usage, self).__init__(**kwargs)
self.id = None
self.current_value = current_value
self.limit = limit
self.name = name
``` |
[
{
"content": "```python\n\"\"\"\nClasses for generating diff coverage reports.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom abc import ABCMeta, abstractmethod\nfrom jinja2 import Environment, PackageLoader\nfrom jinja2_pluralize import pluralize_dj\nfrom diff_cover.snippets import Snippet\nimport six\... | [
{
"content": "<|memory_start|>```python\n\"\"\"\nClasses for generating diff coverage reports.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom abc import ABCMeta, abstractmethod\nfrom jinja2 import Environment, PackageLoader\nfrom jinja2_pluralize import pluralize_dj\nfrom diff_cover.snippets import Snip... | ```python
"""
Classes for generating diff coverage reports.
"""
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from jinja2 import Environment, PackageLoader
from jinja2_pluralize import pluralize_dj
from diff_cover.snippets import Snippet
import six
class DiffViolations(object):
"""
Class to capture violations generated by a particular diff
"""
def __init__(self, violations, measured_lines, diff_lines):
self.lines = {
violation.line for violation in violations
}.intersection(diff_lines)
self.violations = {
violation for violation in violations
if violation.line in self.lines
}
# By convention, a violation reporter
# can return `None` to indicate that all lines are "measured"
# by default. This is an optimization to avoid counting
# lines in all the source files.
if measured_lines is None:
self.measured_lines = set(diff_lines)
else:
self.measured_lines = set(measured_lines).intersection(diff_lines)
class BaseReportGenerator(object):
"""
Generate a diff coverage report.
"""
__metaclass__ = ABCMeta
def __init__(self, violations_reporter, diff_reporter):
"""
Configure the report generator to build a report
from `violations_reporter` (of type BaseViolationReporter)
and `diff_reporter` (of type BaseDiffReporter)
"""
self._violations = violations_reporter
self._diff = diff_reporter
self._diff_violations_dict = None
self._cache_violations = None
@abstractmethod
def generate_report(self, output_file):
"""
Write the report to `output_file`, which is a file-like
object implementing the `write()` method.
Concrete subclasses should access diff coverage info
using the base class methods.
"""
pass
def coverage_report_name(self):
"""
Return the name of the coverage report.
"""
return self._violations.name()
def diff_report_name(self):
"""
Return the name of the diff.
"""
return self._diff.name()
def src_paths(self):
"""
Return a list of source files in the diff
for which we have coverage information.
"""
return {src for src, summary in self._diff_violations().items()
if len(summary.measured_lines) > 0}
def percent_covered(self, src_path):
"""
Return a float percent of lines covered for the source
in `src_path`.
If we have no coverage information for `src_path`, returns None
"""
diff_violations = self._diff_violations().get(src_path)
if diff_violations is None:
return None
# Protect against a divide by zero
num_measured = len(diff_violations.measured_lines)
if num_measured > 0:
num_uncovered = len(diff_violations.lines)
return 100 - float(num_uncovered) / num_measured * 100
else:
return None
def violation_lines(self, src_path):
"""
Return a list of lines in violation (integers)
in `src_path` that were changed.
If we have no coverage information for
`src_path`, returns an empty list.
"""
diff_violations = self._diff_violations().get(src_path)
if diff_violations is None:
return []
return sorted(diff_violations.lines)
def total_num_lines(self):
"""
Return the total number of lines in the diff for
which we have coverage info.
"""
return sum([len(summary.measured_lines) for summary
in self._diff_violations().values()])
def total_num_violations(self):
"""
Returns the total number of lines in the diff
that are in violation.
"""
return sum(
len(summary.lines)
for summary
in self._diff_violations().values()
)
def total_percent_covered(self):
"""
Returns the float percent of lines in the diff that are covered.
(only counting lines for which we have coverage info).
"""
total_lines = self.total_num_lines()
if total_lines > 0:
num_covered = total_lines - self.total_num_violations()
return int(float(num_covered) / total_lines * 100)
else:
return 100
def _diff_violations(self):
"""
Returns a dictionary of the form:
{ SRC_PATH: DiffViolations(SRC_PATH) }
where `SRC_PATH` is the path to the source file.
To make this efficient, we cache and reuse the result.
"""
if not self._diff_violations_dict:
self._diff_violations_dict = {
src_path: DiffViolations(
self._violations.violations(src_path),
self._violations.measured_lines(src_path),
self._diff.lines_changed(src_path),
)
for src_path in self._diff.src_paths_changed()
}
return self._diff_violations_dict
# Set up the template environment
TEMPLATE_LOADER = PackageLoader(__package__)
TEMPLATE_ENV = Environment(loader=TEMPLATE_LOADER,
trim_blocks=True,
lstrip_blocks=True)
TEMPLATE_ENV.filters['iteritems'] = six.iteritems
TEMPLATE_ENV.filters['pluralize'] = pluralize_dj
class TemplateReportGenerator(BaseReportGenerator):
"""
Reporter that uses a template to generate the report.
"""
# Subclasses override this to specify the name of the templates
# If not overridden, the template reporter will raise an exception
TEMPLATE_NAME = None
CSS_TEMPLATE_NAME = None
# Subclasses should set this to True to indicate
# that they want to include source file snippets.
INCLUDE_SNIPPETS = False
def __init__(self, violations_reporter, diff_reporter, css_url=None):
super(TemplateReportGenerator, self).__init__(violations_reporter, diff_reporter)
self.css_url = css_url
def generate_report(self, output_file):
"""
See base class.
output_file must be a file handler that takes in bytes!
"""
if self.TEMPLATE_NAME is not None:
template = TEMPLATE_ENV.get_template(self.TEMPLATE_NAME)
report = template.render(self._context())
if isinstance(report, six.string_types):
report = report.encode('utf-8')
output_file.write(report)
def generate_css(self, output_file):
"""
Generate an external style sheet file.
output_file must be a file handler that takes in bytes!
"""
if self.CSS_TEMPLATE_NAME is not None:
template = TEMPLATE_ENV.get_template(self.CSS_TEMPLATE_NAME)
style = template.render(self._context())
if isinstance(style, six.string_types):
style = style.encode('utf-8')
output_file.write(style)
def _context(self):
"""
Return the context to pass to the template.
The context is a dict of the form:
{
'css_url': CSS_URL,
'report_name': REPORT_NAME,
'diff_name': DIFF_NAME,
'src_stats': {SRC_PATH: {
'percent_covered': PERCENT_COVERED,
'violation_lines': [LINE_NUM, ...]
}, ... }
'total_num_lines': TOTAL_NUM_LINES,
'total_num_violations': TOTAL_NUM_VIOLATIONS,
'total_percent_covered': TOTAL_PERCENT_COVERED
}
"""
# Calculate the information to pass to the template
src_stats = {
src: self._src_path_stats(src) for src in self.src_paths()
}
# Include snippet style info if we're displaying
# source code snippets
if self.INCLUDE_SNIPPETS:
snippet_style = Snippet.style_defs()
else:
snippet_style = None
return {
'css_url': self.css_url,
'report_name': self.coverage_report_name(),
'diff_name': self.diff_report_name(),
'src_stats': src_stats,
'total_num_lines': self.total_num_lines(),
'total_num_violations': self.total_num_violations(),
'total_percent_covered': self.total_percent_covered(),
'snippet_style': snippet_style
}
@staticmethod
def combine_adjacent_lines(line_numbers):
"""
Given a sorted collection of line numbers this will
turn them to strings and combine adjacent values
[1, 2, 5, 6, 100] -> ["1-2", "5-6", "100"]
"""
combine_template = "{0}-{1}"
combined_list = []
# Add a terminating value of `None` to list
line_numbers.append(None)
start = line_numbers[0]
end = None
for line_number in line_numbers[1:]:
# If the current number is adjacent to the previous number
if (end if end else start) + 1 == line_number:
end = line_number
else:
if end:
combined_list.append(combine_template.format(start, end))
else:
combined_list.append(str(start))
start = line_number
end = None
return combined_list
def _src_path_stats(self, src_path):
"""
Return a dict of statistics for the source file at `src_path`.
"""
# Find violation lines
violation_lines = self.violation_lines(src_path)
violations = sorted(self._diff_violations()[src_path].violations)
# Load source snippets (if the report will display them)
# If we cannot load the file, then fail gracefully
if self.INCLUDE_SNIPPETS:
try:
snippets = Snippet.load_snippets_html(src_path, violation_lines)
except IOError:
snippets = []
else:
snippets = []
return {
'percent_covered': self.percent_covered(src_path),
'violation_lines': TemplateReportGenerator.combine_adjacent_lines(violation_lines),
'violations': violations,
'snippets_html': snippets
}
class StringReportGenerator(TemplateReportGenerator):
"""
Generate a string diff coverage report.
"""
TEMPLATE_NAME = "console_coverage_report.txt"
class HtmlReportGenerator(TemplateReportGenerator):
"""
Generate an HTML formatted diff coverage report.
"""
TEMPLATE_NAME = "html_coverage_report.html"
CSS_TEMPLATE_NAME = "external_style.css"
INCLUDE_SNIPPETS = True
class StringQualityReportGenerator(TemplateReportGenerator):
"""
Generate a string diff quality report.
"""
TEMPLATE_NAME = "console_quality_report.txt"
class HtmlQualityReportGenerator(TemplateReportGenerator):
"""
Generate an HTML formatted diff quality report.
"""
TEMPLATE_NAME = "html_quality_report.html"
CSS_TEMPLATE_NAME = "external_style.css"
INCLUDE_SNIPPETS = True
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n#!/usr/bin/env python\n\n\"\"\"\nmain.py -- Udacity conference server-side Python App Engine\n HTTP controller handlers for memcache & task queue access\n\n$Id$\n\ncreated by wesc on 2014 may 24\n\n\"\"\"\n\n__autho... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n\"\"\"\nmain.py -- Udacity conference server-side Python App Engine\n HTTP controller handlers for memcache & task queue access\n\n$Id$\n\ncreated by wesc on 2014 may 24\n\n\... | ```python
#!/usr/bin/env python
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 24
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
from google.appengine.api import memcache
from google.appengine.ext import ndb
from models import Session
from conference import MEMCACHE_FEATURED_SPEAKER_KEY
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
# - - - Task 4: Add a Task - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# The task will check if there is more than one session by this speaker at this conference,
# also add a new Memcache entry that features the speaker and session names.
class CheckFeaturedSpeakerHandler(webapp2.RequestHandler):
def post(self):
"""set memcache entry if speaker has more than one session"""
sessions = Session.query(ancestor=confKey).filter(Session.speakerKey==self.request.get('speakerKey'))
# Add one if the session key just added can not yet be found in the queried sessions
#not_found = not any(s.key.urlsafe() == self.request.get('sessionKey') for s in sessions)
if sessions.count() > 1:
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY,
'%s is our latest Featured Speaker' % self.request.get(
'speakerDisplayName'))
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/check_featuredSpeaker', CheckFeaturedSpeakerHandler),
], debug=True)
``` |
[
{
"content": "Replicate the source code:\n```python\n# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton\n# www.bluepines.org\n# Copyright (c) 2012 42 Lines Inc., Jim Browne\n#\n# Permission ... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2010, Eucalyptus Systems, Inc.\n# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton\n# www.bluepines.org\n# Copyright (c) 2012 42 Lines Inc., Jim Browne\n... | ```python
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton
# www.bluepines.org
# Copyright (c) 2012 42 Lines Inc., Jim Browne
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import exception
import random
import urllib
import uuid
import xml.sax
import boto
from boto.connection import AWSAuthConnection
from boto import handler
import boto.jsonresponse
from boto.route53.record import ResourceRecordSets
from boto.route53.zone import Zone
HZXML = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHostedZoneRequest xmlns="%(xmlns)s">
<Name>%(name)s</Name>
<CallerReference>%(caller_ref)s</CallerReference>
<HostedZoneConfig>
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
#boto.set_stream_logger('dns')
class Route53Connection(AWSAuthConnection):
DefaultHost = 'route53.amazonaws.com'
"""The default Route53 API endpoint to connect to."""
Version = '2013-04-01'
"""Route53 API version."""
XMLNameSpace = 'https://route53.amazonaws.com/doc/2013-04-01/'
"""XML schema for this Route53 API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
validate_certs=True, https_connection_factory=None,
profile_name=None):
super(Route53Connection, self).__init__(host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,
validate_certs=validate_certs,
https_connection_factory=https_connection_factory,
profile_name=profile_name)
def _required_auth_capability(self):
return ['route53']
def make_request(self, action, path, headers=None, data='', params=None):
if params:
pairs = []
for key, val in params.iteritems():
if val is None:
continue
pairs.append(key + '=' + urllib.quote(str(val)))
path += '?' + '&'.join(pairs)
return super(Route53Connection, self).make_request(action, path,
headers, data,
retry_handler=self._retry_handler)
# Hosted Zones
def get_all_hosted_zones(self, start_marker=None, zone_list=None):
"""
Returns a Python data structure with information about all
Hosted Zones defined for the AWS account.
:param int start_marker: start marker to pass when fetching additional
results after a truncated list
:param list zone_list: a HostedZones list to prepend to results
"""
params = {}
if start_marker:
params = {'marker': start_marker}
response = self.make_request('GET', '/%s/hostedzone' % self.Version,
params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HostedZones',
item_marker=('HostedZone',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
while 'NextMarker' in e['ListHostedZonesResponse']:
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
return e
def get_hosted_zone(self, hosted_zone_id):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
"""
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_hosted_zone_by_name(self, hosted_zone_name):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_name: str
:param hosted_zone_name: The fully qualified domain name for the Hosted
Zone
"""
if hosted_zone_name[-1] != '.':
hosted_zone_name += '.'
all_hosted_zones = self.get_all_hosted_zones()
for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']:
#check that they gave us the FQDN for their zone
if zone['Name'] == hosted_zone_name:
return self.get_hosted_zone(zone['Id'].split('/')[-1])
def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):
"""
Create a new Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
:type domain_name: str
:param domain_name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.A list of strings with the image
IDs wanted.
:type caller_ref: str
:param caller_ref: A unique string that identifies the request
and that allows failed CreateHostedZone requests to be retried
without the risk of executing the operation twice. If you don't
provide a value for this, boto will generate a Type 4 UUID and
use that.
:type comment: str
:param comment: Any comments you want to include about the hosted
zone.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
params = {'name': domain_name,
'caller_ref': caller_ref,
'comment': comment,
'xmlns': self.XMLNameSpace}
xml_body = HZXML % params
uri = '/%s/hostedzone' % self.Version
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status,
response.reason,
body)
def delete_hosted_zone(self, hosted_zone_id):
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('DELETE', uri)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
# Resource Record Sets
def get_all_rrsets(self, hosted_zone_id, type=None,
name=None, identifier=None, maxitems=None):
"""
Retrieve the Resource Record Sets defined for this Hosted Zone.
Returns the raw XML data returned by the Route53 call.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type type: str
:param type: The type of resource record set to begin the record
listing from. Valid choices are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
Valid values for weighted resource record sets:
* A
* AAAA
* CNAME
* TXT
Valid values for Zone Apex Aliases:
* A
* AAAA
:type name: str
:param name: The first name in the lexicographic ordering of domain
names to be retrieved
:type identifier: str
:param identifier: In a hosted zone that includes weighted resource
record sets (multiple resource record sets with the same DNS
name and type that are differentiated only by SetIdentifier),
if results were truncated for a given DNS name and type,
the value of SetIdentifier for the next resource record
set that has the current DNS name and type
:type maxitems: int
:param maxitems: The maximum number of records
"""
params = {'type': type, 'name': name,
'Identifier': identifier, 'maxitems': maxitems}
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri, params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def change_rrsets(self, hosted_zone_id, xml_body):
"""
Create or change the authoritative DNS information for this
Hosted Zone.
Returns a Python data structure with information about the set of
changes, including the Change ID.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type xml_body: str
:param xml_body: The list of changes to be made, defined in the
XML schema defined by the Route53 service.
"""
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'},
xml_body)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_change(self, change_id):
"""
Get information about a proposed set of changes, as submitted
by the change_rrsets method.
Returns a Python data structure with status information about the
changes.
:type change_id: str
:param change_id: The unique identifier for the set of changes.
This ID is returned in the response to the change_rrsets method.
"""
uri = '/%s/change/%s' % (self.Version, change_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def create_zone(self, name):
"""
Create a new Hosted Zone. Returns a Zone object for the newly
created Hosted Zone.
:type name: str
:param name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.
"""
zone = self.create_hosted_zone(name)
return Zone(self, zone['CreateHostedZoneResponse']['HostedZone'])
def get_zone(self, name):
"""
Returns a Zone object for the specified Hosted Zone.
:param name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication.
"""
name = self._make_qualified(name)
for zone in self.get_zones():
if name == zone.name:
return zone
def get_zones(self):
"""
Returns a list of Zone objects, one for each of the Hosted
Zones defined for the AWS account.
"""
zones = self.get_all_hosted_zones()
return [Zone(self, zone) for zone in
zones['ListHostedZonesResponse']['HostedZones']]
def _make_qualified(self, value):
"""
Ensure passed domain names end in a period (.) character.
This will usually make a domain fully qualified.
"""
if type(value) in [list, tuple, set]:
new_list = []
for record in value:
if record and not record[-1] == '.':
new_list.append("%s." % record)
else:
new_list.append(record)
return new_list
else:
value = value.strip()
if value and not value[-1] == '.':
value = "%s." % value
return value
def _retry_handler(self, response, i, next_sleep):
status = None
boto.log.debug("Saw HTTP status: %s" % response.status)
if response.status == 400:
code = response.getheader('Code')
if code and 'PriorRequestNotComplete' in code:
# This is a case where we need to ignore a 400 error, as
# Route53 returns this. See
# http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html
msg = "%s, retry attempt %s" % (
'PriorRequestNotComplete',
i
)
next_sleep = random.random() * (2 ** i)
i += 1
status = (msg, i, next_sleep)
return status
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n\nfrom jsk_network_tools.msg import FC2OCS, OCS2FC\nfrom jsk_network_tools.silverhammer_util import *\nfrom threading import Lock, Thread\nfrom socket import *\nfrom struct import Struct\nimport os\nimport rospy\nimport signal\nimport sys\nimport roslib\nfrom rosl... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n\nfrom jsk_network_tools.msg import FC2OCS, OCS2FC\nfrom jsk_network_tools.silverhammer_util import *\nfrom threading import Lock, Thread\nfrom socket import *\nfrom struct import Struct\nimport os\nimport rospy\nimport signal\nimport sys\nimport r... | ```python
#!/usr/bin/env python
from jsk_network_tools.msg import FC2OCS, OCS2FC
from jsk_network_tools.silverhammer_util import *
from threading import Lock, Thread
from socket import *
from struct import Struct
import os
import rospy
import signal
import sys
import roslib
from roslib.message import get_message_class
from std_msgs.msg import Time
import diagnostic_updater
from diagnostic_msgs.msg import DiagnosticStatus
class SilverHammerUDPListener():
def __init__(self, server, buffer_size, format, message, pub):
self.server = server
self.format = format
self.pub = pub
self.message = message
self.buffer_size = buffer_size
def run(self):
recv_data, addr = self.server.recvfrom(self.buffer_size)
msg = unpackMessage(recv_data, self.format, self.message)
self.pub.publish(msg)
print "received:", msg
class SilverHammerLowspeedReceiver():
def __init__(self):
message_class_str = rospy.get_param("~message",
"jsk_network_tools/FC2OCS")
try:
self.receive_message = get_message_class(message_class_str)
except:
raise Exception("invalid topic type: %s"%message_class_str)
self.lock = Lock()
self.launched_time = rospy.Time.now()
self.diagnostic_updater = diagnostic_updater.Updater()
self.diagnostic_updater.setHardwareID("none")
self.diagnostic_updater.add("LowspeedReceiver", self.diagnosticCallback)
self.received_num = 0
self.receive_port = rospy.get_param("~receive_port", 1024)
self.receive_ip = rospy.get_param("~receive_ip", "192.168.8.101")
self.receive_buffer = rospy.get_param("~receive_buffer_size", 250)
self.socket_server = socket(AF_INET, SOCK_DGRAM)
self.socket_server.settimeout(None)
self.socket_server.bind((self.receive_ip, self.receive_port))
self.receive_format = msgToStructFormat(self.receive_message())
self.pub = rospy.Publisher("~output", self.receive_message)
self.last_received_time = rospy.Time(0)
self.last_received_time_pub = rospy.Publisher(
"~last_received_time", Time)
self.last_publish_output_time = rospy.Time(0)
self.last_publish_output_time_pub = rospy.Publisher(
"~last_publish_output_time", Time)
self.diagnostic_timer = rospy.Timer(rospy.Duration(1.0 / 10),
self.diagnosticTimerCallback)
def diagnosticTimerCallback(self, event):
self.diagnostic_updater.update()
# and publish time
with self.lock:
self.last_publish_output_time_pub.publish(self.last_publish_output_time)
self.last_received_time_pub.publish(self.last_received_time)
def diagnosticCallback(self, stat):
# always OK
stat.summary(DiagnosticStatus.OK, "OK")
with self.lock:
now = rospy.Time.now()
stat.add("Uptime [sec]",
(now - self.launched_time).to_sec())
stat.add("Time from the last reception [sec]",
(now - self.last_received_time).to_sec())
stat.add("Time from the last publish ~output [sec]",
(now - self.last_publish_output_time).to_sec())
stat.add("UDP address", self.receive_ip)
stat.add("UDP port", self.receive_port)
return stat
def run(self):
while not rospy.is_shutdown():
recv_data, addr = self.socket_server.recvfrom(self.receive_buffer)
msg = unpackMessage(recv_data, self.receive_format,
self.receive_message)
with self.lock:
self.last_received_time = rospy.Time.now()
self.pub.publish(msg)
with self.lock:
self.last_publish_output_time = rospy.Time.now()
rospy.logdebug("received:", msg)
if __name__ == "__main__":
rospy.init_node("silverhammer_lowspeed_receiver")
rc = SilverHammerLowspeedReceiver()
rc.run()
``` |
[
{
"content": "```python\n# ----------------------------------------------------------------------------\n# Copyright 2015 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the ... | [
{
"content": "<|memory_start|>```python\n# ----------------------------------------------------------------------------\n# Copyright 2015 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtai... | ```python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Defines Tensor and Backend class
"""
import numpy as np
import logging
from math import ceil
logger = logging.getLogger(__name__)
class OpCollection(object):
"""
A collection of the set of operation strings
"""
zero_operand_ops = {"rand", "onehot"}
unary_ops = {"finite", "neg", "abs", "sgn", "sqrt", "sqr", "exp", "log",
"exp2", "log2", "sig", "sig2", "tanh", "tanh2", "transpose",
"safelog"}
binary_ops = {"assign", "add", "sub", "mul", "div", "eq", "ne", "lt", "le",
"gt", "ge", "pow", "minimum", "maximum", "dot"}
reduction_ops = {"sum", "max", "min", "argmax", "argmin"}
float_ops = zero_operand_ops | unary_ops | binary_ops
ew_ops = float_ops - {'dot', 'transpose'}
class Tensor(object):
"""
The n-dimensional array data structure. GPUTensor and Tensor inherits
Tensor. Depending on backend, may have additional keyword arguments.
All non-keywords arguments shall be in exact same order as Tensor.
Arguments:
backend (Backend): backend of the tensor.
shape (tuple, optional): shape of the tensor.
dtype (numpy.ndtype, optional): underlying data type of the elements.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor will
persist across multiple begin and
end calls. Setting to False may
provide a performance increase if
values do not need to be maintained
across such calls
See also:
GPUTensor class, Tensor class
Notes:
Unlike numpy, in this implementation we never collapse dimensions, and
the minimal number of dimensions will be _min_dims (currently set to
2). So a wrapped scalar will have dimension 1x1.
"""
def __init__(self,
backend,
shape=None,
dtype=np.float32,
name=None,
persist_values=True):
self.backend = backend
self.shape = shape
self.dtype = dtype
self.name = name
self.persist_values = persist_values
self._min_dims = 2
def __str__(self):
"""
Returns a string representation of this Tensor.
Returns:
str: the representation.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def __repr__(self):
"""
Returns a more unambiguous string representation of the Tensor.
Returns:
str: the string representation.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def __len__(self):
"""
Return the size of the leading dimension of self.
Returns:
int: the size of the leading dimension.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def __setitem__(self, index, value):
"""
Assign the specified value to a subset of elements found via slice
style indexing along each dimension. e.g. A[5:10, :] = 4.5.
Each slice consists of start_idx:stop_idx:step_size triplets. If
step_size isn't specified it defaults to 1. If start_idx isn't
specified it defaults to 0. If stop_idx isn't specified it defaults
to the total number of elements along that dimension. As such a slice
value of ':' allows one to select all elements along that dimension.
Arguments:
index (int, slice, tuple): indices of each dimension's slice.
value (numeric array, Tensor): values to be assigned to the
extracted element subset. If an
array it should be the same shape
as what key indexes (or be
broadcastable as such).
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def __getitem__(self, index):
"""
Extract a subset view of the items via slice style indexing
along each dimension. e.g. A[5:10, :]. Each slice consists of
start_idx:stop_idx:step_size triplets. If step_size isn't specified it
defaults to 1. If start_idx isn't specified it defaults to 0. If
stop_idx isn't specified it defaults to the total number of elements
along that dimension. As such a slice value of ':' allows one to
select all elements along that dimension.
Arguments:
index (int, slice, tuple): indices of each dimension's slice.
Returns:
Tensor: view of self corresponding to the subset items.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def _assign(self, value):
"""
Assign an input value to the Tensor. The NervanaCPU does clipping
for int and uint types, when overflow happens
Arguments:
value (Tensor, OpTreNode, numeric): the value to be assigned.
"""
raise NotImplementedError()
def set(self, ary):
"""
Copy host array to the tensor.
Arguments:
ary (numpy.ndarray): host array, needs to be contiguous
Returns:
Tensor: self
"""
raise NotImplementedError()
def get(self):
"""
Copy tensor to host as numpy array.
Returns:
numpy.ndarray: A host numpy array
"""
raise NotImplementedError()
def asnumpyarray(self):
"""
Convert the tensor to an in host memory `numpy.ndarray`. A copy of the
data may be made depending on where the Tensor normally resides.
Returns:
numpy.ndarray view or copy of the Tensor data.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def take(self, indices, axis, out=None):
"""
Select a subset of elements from an array across an axis
Arguments:
indices (Tensor, numpy ndarray): indicies of elements to select
axis (int): axis across which to select the values
out (Tensor, numpy ndarray, optional): place the resultant values
into this array if
specified.
Return:
Tensor: Tensor with selected values
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def fill(self, value):
"""
Assign specified value to each element of this Tensor.
Arguments:
value (numeric): The value to be assigned to each element.
Return:
Tensor: updated view of the data.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def copy(self, a):
"""
Construct and return a deep copy of the Tensor passed.
Arguments:
a (Tensor): the object to copy
Returns:
Tensor: new array object with the same values as tsr.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def copy_from(self, a):
"""
Copy contents from `a`.
Arguments:
a (numpy.ndarray): the host-resident object to copy from
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def reshape(self, *shape):
"""
Adjusts the dimensions of the data to the specified shape. The number
of elements represented by the new shape must be the same as before.
Arguments:
shape (int, list): new length of each dimension
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
@property
def T(self):
"""
Return a transposed view of the data.
Returns:
Tensor: transposed view of self.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def transpose(self, out=None):
"""
Return a transposed view of the data. Alias of .T property needed for
MOP compatibility.
Arguments:
out (Tensor, numpy ndarray, optional): place the resultant values
into this array if
specified.
Returns:
Tensor: transposed view of self.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def hist(self, tag):
"""
Compute a histogram of the current tensor values.
Arguments:
tag (string): Tag to identify the current state of the tensor,
useful for disambiguating multiple histograms of the
same tensor at different points in time.
Returns:
Tensor containing the histogram data.
Raises:
NotImplementedError: Can't be instantiated directly.
"""
raise NotImplementedError()
def __add__(self, other):
"""
Perform `add` operations.
Arguments:
other: the right-hand side operand
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("add", self, other)
def __sub__(self, other):
return OpTreeNode.build("sub", self, other)
def __mul__(self, other):
return OpTreeNode.build("mul", self, other)
def __div__(self, other):
return OpTreeNode.build("div", self, other)
def __truediv__(self, other):
return OpTreeNode.build("div", self, other)
def __pow__(self, other):
return OpTreeNode.build("pow", self, other)
def __radd__(self, other):
return OpTreeNode.build("add", other, self)
def __rsub__(self, other):
return OpTreeNode.build("sub", other, self)
def __rmul__(self, other):
return OpTreeNode.build("mul", other, self)
def __rdiv__(self, other):
return OpTreeNode.build("div", other, self)
def __rtruediv__(self, other):
return OpTreeNode.build("div", other, self)
def __rpow__(self, other):
return OpTreeNode.build("pow", other, self)
def __eq__(self, other):
return OpTreeNode.build("eq", self, other)
def __ne__(self, other):
return OpTreeNode.build("ne", self, other)
def __lt__(self, other):
return OpTreeNode.build("lt", self, other)
def __le__(self, other):
return OpTreeNode.build("le", self, other)
def __gt__(self, other):
return OpTreeNode.build("gt", self, other)
def __ge__(self, other):
return OpTreeNode.build("ge", self, other)
def __abs__(self):
return OpTreeNode.build("abs", self, None)
def __neg__(self):
return OpTreeNode.build("neg", self, None)
class Backend(object):
"""
Backend interface used to manipulate Tensor data. This abstract base class
defines what operations each concrete backend must support.
NervanaGPU and NervanaCPU inherit Backend.
Arguments:
rng_seed (int, optional): random number generator seed value
default_dtype (numpy.ndtype, optional): Elemental data type to use when
creating new tensors if not
otherwise specified. Defaults
to np.float32
compat_mode (str, optional): Flag to match implementation of other
libraries. Currently only 'caffe' is
supported, defaults to None.
"""
def __init__(self, rng_seed=None, default_dtype=np.float32,
compat_mode=None):
# dtype
self.default_dtype = default_dtype
# use RandomState instead of seed
self.rng = np.random.RandomState(rng_seed)
self.init_rng_state = self.rng.get_state() # for resetting state
# batch size
self.bsz = None
self._min_dims = 2
if compat_mode is not None:
if compat_mode == 'caffe':
self.set_caffe_compat()
else:
raise ValueError('%s mode not supported currently' % compat_mode)
else:
self.compat_mode = None
def output_dim(self, X, S, padding, strides, pooling=False):
"""
compute along 1 dimension, with these sizes, what will be the output dimension
Arguments:
X (int): input data dimension
S (int): filter dimension
padding (int): padding on each side
strides (int): striding
pooling (bool): flag for setting pooling layer size
"""
if self.check_caffe_compat() and pooling:
size = int(ceil(float(X - S + 2 * padding)/strides)) + 1
if padding > 0 and (size - 1)*strides >= X + padding:
# decrement size if last pooling op is completely in padding
size -= 1
else:
# normal neon output size determination
size = (X - S + 2 * padding)/strides + 1
return size
def set_caffe_compat(self):
"""
Set flag to make layers compatible with caffe in terms of conv and pool
layer output size determination and dropout layer implementation
"""
self.compat_mode = 'caffe'
def check_caffe_compat(self):
return self.compat_mode == 'caffe'
def iobuf(self, dim0, x=None, dtype=None, name=None, persist_values=True,
shared=None, parallelism=None):
"""
Allocate input and output buffer for layer based on batch size. This
is used because the layer does not know about the batch size.
Arguments:
dim0 (tuple or int): I/O buffer dimension for layer (without the
axis specifying the batch size).
x (data-type, optional): If present and not None, `x` will be
returned directly. `x` will be not None if
the buffer has already been allocated.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor will
persist across multiple begin and
end calls. Setting to False may
provide a performance increase if
values do not need to be maintained
across such calls
shared (buffer, optional): If present will attempt to reuse the memory
in shared to allocate the I/O buffer
parallelism (str, optional): Indicates type of parallelism (Data,
Model) employed by this buffer.
Ignored on CPU and GPU backends,
defaults to no parallelism.
Returns:
Tensor: array object
"""
if x is not None:
return x
if isinstance(dim0, tuple):
if (len(dim0) == 2):
bufshape = (dim0[0], dim0[1] * self.bsz)
else:
bufshape = (np.prod(dim0), self.bsz)
else:
bufshape = (dim0, self.bsz)
if shared is not None:
if shared.shape == bufshape:
return shared
else:
return shared.share(bufshape)
else:
return self.zeros(bufshape, dtype=dtype, name=name,
persist_values=persist_values)
def rng_reset(self):
"""
Reset the random state to the state where the Backend is first
initialized.
usually need to do: self.rng.set_state(self.init_rng_state)
"""
raise NotImplementedError()
def execute(self, node):
"""
Execute the optree. There must be one and only one 'assign' op at the
top of the optree when execute is called.
Arguments:
node (OpTreeNode): The op-tree to execute.
"""
pass
def begin(self, block, identifier):
"""
Signal the start of a block of repeated computation (ex. at the start
of a loop). This operation can be used to help the compiler optimize
instruction performance, but has no direct effect on calculations.
It must be book-ended by a corresponding Backend.end() call.
Note that multiple begin calls can appear adjacent in nested loops.
Arguments:
block (Block.attr): identifies the type of computation being worked
on based on Block attribute specified
identifier (int): unique identifier for this particular iteration
of the block. Will typically be something like
epoch number, mini-batch number, and so forth.
See Also:
:py:func:`~neon.backends.backend.Backend.end`,
"""
pass
def end(self, block, identifier):
"""
Signal the corresponding end of a block of repeated computation
(ex. at the end of a loop). This operation can be used to help the
compiler optimize performance, but has no direct effect on
calculations. It must be preceded by a corresponding Backend.begin()
call.
Arguments:
block (Block.attr): identifies the type of computation being worked
on based on Block attribute specified
identifier (int): unique identifier for this particular iteration
of the block. Will typically be something like
epoch number, mini-batch number, and so forth.
See Also:
:py:func:`~neon.backends.backend.Backend.begin`,
"""
pass
def empty(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, without
initializing element values. This is slightly faster than
:py:func:`~neon.backends.Backend.array`,
:py:func:`~neon.backends.Backend.ones`,
:py:func:`~neon.backends.Backend.zeros`, but the values will be
random.
Arguments:
shape (int, list): length of each dimension of the Tensor.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.Backend.array`,
:py:func:`~neon.backends.Backend.zeros`,
:py:func:`~neon.backends.Backend.ones`
"""
raise NotImplementedError()
def array(self, ary, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, populating
elements based on ary values.
Arguments:
ary (array_like): input array object to construct from. Can be
built-in python scalar or list (of lists), or a
numpy.ndarray
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.Backend.empty`,
:py:func:`~neon.backends.Backend.zeros`,
:py:func:`~neon.backends.Backend.ones`
"""
raise NotImplementedError()
def zeros(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, populating
Each element with a value of 0.
Arguments:
shape (int, list): length of each dimension of the Tensor.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.Backend.empty`,
:py:func:`~neon.backends.Backend.ones`,
:py:func:`~neon.backends.Backend.array`
"""
raise NotImplementedError()
def ones(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, populating
Each element with a value of 1.
Arguments:
shape (int, list): length of each dimension of the Tensor.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.zeros`,
:py:func:`~neon.backends.backend.Backend.array`
"""
raise NotImplementedError()
def empty_like(self, other_ary, name=None, persist_values=True):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from other_ary.
Arguments:
other_ary (tensor object): Tensor to inherit the dimensions of.
name (str, optional): name indentifying the tensor (used in printing).
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.Backend.empty`,
:py:func:`~neon.backends.Backend.ones`,
:py:func:`~neon.backends.Backend.array`
"""
raise NotImplementedError()
def zeros_like(self, other_ary, name=None, persist_values=True):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from other_ary and populating each element with a value of 0.
Arguments:
other_ary (tensor object): Tensor to inherit the dimensions of.
name (str, optional): name indentifying the tensor (used in printing).
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.Backend.empty`,
:py:func:`~neon.backends.Backend.ones`,
:py:func:`~neon.backends.Backend.array`
"""
raise NotImplementedError()
def dot(self, a, b, out=None):
"""
Dot product of two Tensors.
Arguments:
a (Tensor): left-hand side operand.
b (Tensor): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Note that this object should differ from
left and right.
Returns:
OpTreeNode: the resulting op-tree from this operation.
"""
return OpTreeNode.build("dot", a, b, out=out)
def compound_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False):
"""
Perform one of the following operations (* is dot product)
C = alpha * A * B + beta * C
C = alpha * A.T * B + beta * C
C = alpha * A * B.T + beta * C
relu: if true, applied before output (and prior to beta addition)
The operation will be short-circuited to: out <- alpha * left * right
if beta has value 0 (the default).
Arguments:
A (Tensor): left-hand side operand.
B (Tensor): right-hand side operand.
C (Tensor): output operand
alpha (float. optional): scale A*B term
beta (float, optional): scale C term before sum
relu (bool, optional): If True apply ReLu non-linearity before
output. Defaults to False.
"""
raise NotImplementedError()
def batched_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False):
"""
Perform one of the following operations:
1. For fprop: A(K, C), B(X,C,N), C(X,K,N) --> call batched_dot(A, B, C)
2. For bprop: A(K, C), B(X,K,N), C(X,C,N) --> call batched_dot(A.T, B, C)
3. For update: A(X,K,N), B(X,C,N), C(K,C) --> call batched_dot(A, B.T, C)
Arguments:
A (Tensor): left-hand input operand
B (Tensor): right-hand input operand
C (Tensor): output operand
alpha (float. optional): scale A*B term
beta (float, optional): scale C term before sum
relu (bool, optional): If True apply ReLu non-linearity before
output. Defaults to False.
"""
raise NotImplementedError()
def make_binary_mask(self, out, keepthresh=0.5):
"""
Create a binary mask for dropout layers.
Arguments:
out (Tensor): Output tensor
keepthresh (float, optional): fraction of ones. Defaults to 0.5
"""
raise NotImplementedError()
def add(self, a, b, out=None):
"""
Perform element-wise addition on the operands, storing the resultant
values in the out Tensor. Each operand and out must have identical
shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("add", a, b, out=out)
def subtract(self, a, b, out=None):
"""
Perform element-wise subtraction on the operands, storing the resultant
values in the out Tensor. Each operand and out must have identical
shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sub", a, b, out=out)
def multiply(self, a, b, out=None):
"""
Perform element-wise multiplication on the operands, storing the
resultant values in the out Tensor. Each operand and out must have
identical shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("mul", a, b, out=out)
def divide(self, a, b, out=None):
"""
Perform element-wise division on the operands, storing the
resultant values in the out Tensor. Each operand and out must have
identical shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("div", a, b, out=out)
def true_divide(self, a, b, out=None):
"""
Here it is an alias of divide.
Instead of the Python traditional 'floor division', this returns a
true division.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("div", a, b, out=out)
def power(self, a, b, out=None):
"""
Perform element-wise raise of tsr values to specified power,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
b (Tensor, numeric): exponentiated value to be applied to
element. Examples include 2 (square),
0.5 (sqaure root).
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("pow", a, b, out=out)
def reciprocal(self, a, out=None):
"""
Perform element-wise reciprocal of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
power (Tensor, numeric): exponentiated value to be applied to
element. Examples include 2 (square),
0.5 (sqaure root).
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("div", 1., a, out=out)
def negative(self, a, out=None):
"""
Perform element-wise negation of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("neg", a, None, out=out)
def sgn(self, a, out=None):
"""
Perform element-wise indication of the sign of Tensor `a`, storing the
result in Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sgn", a, None, out=out)
def absolute(self, a, out=None):
"""
Perform element-wise absolute value of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("abs", a, None, out=out)
def fabs(self, a, out=None):
"""
Perform element-wise absolute value of Tensor `a`, storing the result
in Tensor out. Both Tensor's should have identical shape. Implemented as
an alias of absolute.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("abs", a, None, out=out)
def sqrt(self, a, out=None):
"""
Perform element-wise square-root of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sqrt", a, None, out=out)
def square(self, a, out=None):
"""
Perform element-wise square of Tensor `a`, storing the result in Tensor
out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sqr", a, None, out=out)
def exp(self, a, out=None):
"""
Perform element-wise exponential transformation on Tensor `a`, storing
the result in Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("exp", a, None, out=out)
def exp2(self, a, out=None):
"""
Perform element-wise 2-based exponential transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("exp2", a, None, out=out)
def safelog(self, a, out=None):
"""
Perform element-wise natural logarithm transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape. This log function has built in safety for underflow.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("safelog", a, None, out=out)
def log(self, a, out=None):
"""
Perform element-wise natural logarithm transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("log", a, None, out=out)
def log2(self, a, out=None):
"""
Perform element-wise 2-based logarithm transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("log2", a, None, out=out)
def sig(self, a, out=None):
"""
Perform element-wise sigmoid transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sig", a, None, out=out)
def sig2(self, a, out=None):
"""
Perform element-wise 2-based sigmoid logarithm transformation on
Tensor `a`, storing the result in Tensor out. Both Tensor's should
have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("sig2", a, None, out=out)
def tanh(self, a, out=None):
"""
Perform element-wise hyperbolic tangent transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("tanh", a, None, out=out)
def tanh2(self, a, out=None):
"""
Perform element-wise 2-based hyperbolic tangent transformation on Tensor
`a`, storing the result in Tensor out. Both Tensor's should have
identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("tanh2", a, None, out=out)
def finite(self, a, out=None):
"""
Perform element-wise test of finiteness (not infinity or not Not a
Number) on Tensor `a`, storing the result in Tensor out. Both Tensor's
should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("finite", a, None, out=out)
def equal(self, a, b, out=None):
"""
Performs element-wise equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("eq", a, b, out=out)
def not_equal(self, a, b, out=None):
"""
Performs element-wise non-equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("ne", a, b, out=out)
def less(self, a, b, out=None):
"""
Performs element-wise less than testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("lt", a, b, out=out)
def less_equal(self, a, b, out=None):
"""
Performs element-wise less than or equal testing on each element of
left and right, storing the result in out. Each operand is assumed to
be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("le", a, b, out=out)
def greater(self, a, b, out=None):
"""
Performs element-wise greater than testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only theshape op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("gt", a, b, out=out)
def greater_equal(self, a, b, out=None):
"""
Performs element-wise greater than or equal testing on each element of
left and right, storing the result in out. Each operand is assumed to
be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("ge", a, b, out=out)
def maximum(self, a, b, out=None):
"""
Performs element-wise maximum value assignment based on corresponding
elements of left and right, storing the result in out. Each operand is
assumed to be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("maximum", a, b, out=out)
def minimum(self, a, b, out=None):
"""
Performs element-wise minimum value assignment based on corresponding
elements of left and right, storing the result in out. Each operand is
assumed to be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("minimum", a, b, out=out)
def clip(self, a, a_min, a_max, out=None):
"""
Performs element-wise clipping of Tensor `a`, storing the result in out.
The clipped value will be between [a_min, a_max].
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return self.minimum(self.maximum(a, a_min), a_max, out=out)
def sum(self, a, axis=None, out=None, keepdims=True):
"""
Calculates the summation of the elements along the specified axis.
Arguments:
a (Tensor): the Tensor on which to perform the sum
axis (int, optional): the dimension along which to compute.
If set to None, we will sum over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
if axis is None:
return OpTreeNode.build("sum", OpTreeNode.build("sum", a, None, axis=0),
None, axis=1, out=out)
return OpTreeNode.build("sum", a, None, axis=axis, out=out)
def max(self, a, axis=None, out=None, keepdims=True):
"""
Calculates the maximal element value along the specified axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take max over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
if axis is None:
return OpTreeNode.build("max", OpTreeNode.build("max", a, None, axis=0),
None, axis=1, out=out)
return OpTreeNode.build("max", a, None, axis=axis, out=out)
def min(self, a, axis=None, out=None, keepdims=True):
"""
Calculates the minimal element value along the specified axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take min over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
if axis is None:
return OpTreeNode.build("min", OpTreeNode.build("min", a, None, axis=0),
None, axis=1, out=out)
return OpTreeNode.build("min", a, None, axis=axis, out=out)
def argmax(self, a, axis=1, out=None, keepdims=True):
"""
Calculates the indices of the maximal element value along the specified
axis. If multiple elements contain the maximum, only the indices of
the first are returned.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take argmax over all
dimensions. Defaults to 1
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("argmax", a, None, axis=axis, out=out)
def argmin(self, a, axis=1, out=None, keepdims=True):
"""
Calculates the indices of the minimal element value along the specified
axis. If multiple elements contain the minimum, only the indices of
the first are returned.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take argmin over all
dimensions. Defaults to 1
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("argmin", a, None, axis=axis, out=out)
def mean(self, a, axis=None, partial=None, out=None, keepdims=True):
"""
Calculates the arithmetic mean of the elements along the specified
axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take mean over all
dimensions. Defaults to None
partial (bool, optional): Not currently used.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
shape = a.shape
if axis is None:
return self.multiply(self.sum(a), 1.0 / (shape[0] * shape[1]), out=out)
return self.multiply(self.sum(a, axis=axis), 1.0 / shape[axis], out=out)
def var(self, a, axis=None, partial=None, out=None, keepdims=True):
"""
Calculates the variance of the elements along the specified
axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take var over all
dimensions. Defaults to None
partial (bool, optional): Not currently used.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
if axis is None:
return self.mean(self.square(a - self.mean(a)), out=out)
return self.mean(self.square(a - self.mean(a, axis=axis)), axis=axis, out=out)
def std(self, a, axis=None, partial=None, out=None, keepdims=True):
"""
Calculates the standard deviation of the elements along the specified
axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take std over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
partial (bool, optional): Not currently used.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
return self.sqrt(self.var(a, axis=axis, partial=partial, out=out))
def take(self, a, indices, axis, out=None):
"""
Extract elements based on the indices along a given axis.
Arguments:
a (Tensor): the Tensor on which to perform the operation
indices (Tensor, numpy ndarray): indicies of elements to select
axis (int, optional): the dimension along which to compute.
If set to None, we will extract over all
dimensions (flattened first)
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
"""
return a.take(indices, axis, out)
def onehot(self, indices, axis, out=None):
"""
Generate optree for converting `indices` to a onehot representation
Arguments:
indices (Tensor): Elements must be of numpy integer type for gpu
onehot to work.
axis (int): the axis along the feature length dimension
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
if axis not in (0, 1):
raise ValueError("bad axis for onehot")
return OpTreeNode.build("onehot", None, None, idx=indices, axis=axis, out=out)
def update_fc_bias(self, err, out):
"""
Compute the updated bias gradient for a fully connected network layer.
Arguments:
err (Tensor): backpropagated error
out (Tensor): Where to store the updated gradient value.
"""
self.ng.sum(err, axis=1, out=out)
def add_fc_bias(self, inputs, bias):
"""
Add the bias for a fully connected network layer.
Arguments:
inputs (Tensor): the input to update.
bias (Tensor): the amount to increment
"""
self.ng.add(inputs, bias, out=inputs)
def conv_layer(self, dtype,
N, C, K,
D=1, H=1, W=1,
T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1,
relu=False, bsum=False, deterministic_update=False):
"""
Create a new ConvLayer parameter object.
This is then passed as an argument to all the convolution operations.
Arguments:
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
N (int): Number of images in mini-batch
C (int): Number of input feature maps
K (int): Number of output feature maps
D (int, optional): Depth of input image. Defaults to 1
H (int, optional): Height of input image. Defaults to 1
W (int, optional): Width of input image. Defaults to 1
T (int, optional): Depth of filter kernel. Defaults to 1
R (int, optional): Height of filter kernel. Defaults to 1
S (int, optional): Width of filter kernel. Defaults to 1
pad_d (int, optional): amount of zero-padding around the depth edge
Defaults to 0.
pad_h (int, optional): amount of zero-padding around the height edge
Defaults to 0.
pad_w (int, optional): amount of zero-padding around the width edge
Defaults to 0.
str_d (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_h (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_w (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
relu (bool, optional): apply a relu transform to the output for
fprop or bprop. Defaults to False
bsum (bool, optional): calculate the sum along the batchnorm axis
for fprop or bprop. Outputs an fp32 tensor
of size Kx1. Defaults to False.
deterministic_update (bool, optional): eleminate atomic adds in the
update operation. Increases
reproducibility but runs
slower. Defaults to False.
"""
raise NotImplementedError()
def fprop_conv(self, layer, I, F, O, alpha=1.0, relu=False, repeat=1):
"""
Forward propagate the inputs of a convolutional network layer to
produce output
Arguments:
layer: the conv layer as a parameter object
I (Tensor): inputs
F (Tensor): the weights (filters)
O (Tensor): outputs
alpha (float, optional): linear scaling. Defaults to 1.0
relu (bool, optional): apply ReLu before output. Default not to.
repeat (int, optional): Repeat this operation the specified number
of times. Defaults to 1.
"""
raise NotImplementedError()
def bprop_conv(self, layer, F, E, grad_I, alpha=1.0, repeat=1):
"""
Backward propagate the error through a convolutional network layer.
Arguments:
layer: the conv layer as a parameter object
F (Tensor): the weights (filters)
E (Tensor): errors
grad_I (Tensor): gradient to inputs (output delta)
alpha (float, optional): linear scaling. Defaults to 1.0
repeat (int, optional): Repeat this operation the specified number
of times. Defaults to 1.
"""
raise NotImplementedError()
def update_conv(self, layer, I, E, grad_F, alpha=1.0, repeat=1):
"""
Compute the updated gradient for a convolutional network layer.
Arguments:
layer: the conv layer as a parameter object
I (Tensor): the inputs
E (Tensor): the errors
grad_F (Tensor): filter gradients (weights) to update.
alpha (float, optional): linear scaling. Defaults to 1.0
repeat (int, optional): Repeat this operation the specified number
of times. Defaults to 1.
"""
raise NotImplementedError()
def deconv_layer(self, dtype,
N, C, K,
P, Q,
R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1):
"""
Create a new Deconvolution parameter object.
This then is passed as an argument to all deconvolution kernels.
Arguments:
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
N (int): Number of images in mini-batch
C (int): Number of input feature maps
K (int): Number of output feature maps
P (int): Height of output
Q (int): Width of output
R (int, optional): Height of filter kernel. Defaults to 1
S (int, optional): Width of filter kernel. Defaults to 1
pad_d (int, optional): amount of zero-padding around the depth edge
Defaults to 0.
pad_h (int, optional): amount of zero-padding around the height edge
Defaults to 0.
pad_w (int, optional): amount of zero-padding around the width edge
Defaults to 0.
str_d (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_h (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_w (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
raise NotImplementedError()
def pool_layer(self, dtype,
op, N, C,
D=1, H=1, W=1,
J=1, T=1, R=1, S=1,
pad_j=0, pad_d=0, pad_h=0, pad_w=0,
str_j=None, str_d=None, str_h=None, str_w=None):
"""
Create a new PoolLayer parameter object.
This then is passed as an argument to all pooling kernels.
Arguments:
op (str): "max", "avg", "l2" pooling (currently bprop only supports
max, but not avg and l2)
N (int): Number of images in mini-batch
C (int): Number of input feature maps
D (int, optional): Depth of input image. Defaults to 1
H (int, optional): Height of input image. Defaults to 1
W (int, optional): Width of input image. Defaults to 1
J (int, optional): Size of feature map pooling window
(maxout n_pieces). Defaults to 1
T (int, optional): Depth of pooling window. Defaults to 1
R (int, optional): Height of pooling window. Defaults to 1
S (int, optional): Width of pooling window. Defaults to 1
pad_j (int, optional): amount of zero-padding around the fm pooling
window edge. Defaults to 0.
pad_d (int, optional): amount of zero-padding around the depth edge
Defaults to 0.
pad_h (int, optional): amount of zero-padding around the height edge
Defaults to 0.
pad_w (int, optional): amount of zero-padding around the width edge
Defaults to 0.
str_d (int, optional): factor to step the filters by in the fm
pooling window direction. Defaults to 1
str_d (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_h (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_w (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
raise NotImplementedError()
def fprop_pool(self, layer, I, O):
"""
Forward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object, different backends have
different pool layers.
I (Tensor): Input tensor.
O (Tensor): output tensor.
"""
raise NotImplementedError()
def bprop_pool(self, layer, I, E, grad_I):
"""
Backward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object. Different backends have
different pool layers.
I (Tensor): Input tensor.
E (Tensor): Error tensor.
grad_I (Tensor): Gradient tensor (delta)
"""
raise NotImplementedError()
def compound_bprop_lut(self, nin, inputs, error, error_t, dW, pad_idx, alpha=1.0, beta=0):
"""
Backward propagate lookup table layer.
Arguments:
nin (integer): Number of input word_ids.
inputs (Tensor): Input tensor.
error (Tensor): Error tensor.
error_t (Tensor): Transposed error tensor.
dW (Tensor): Gradient tensor (delta).
pad_idx (integer):
alpha (float):
beta (float):
"""
raise NotImplementedError()
# For constructing an op tree used in lazy evaluation
class OpTreeNode(tuple):
"""
An OpTreeNode is a tuple of length 3. The first element is a dict
specifying the operation, and the second and third elements specify the
operands. From an op-tree's tree perspective, think about the 3
elements as 3 nodes. The second and third element are the left and right
child of the first element.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __str__(self):
s = '(' + str(self[0])
s += ', '
if isinstance(self[1], Tensor):
if self[1].name and self[1].name is not None:
s += self[1].name
else:
s += 'tensor-' + hex(id(self[1]))
else:
s += str(self[1])
s += ', '
if isinstance(self[2], Tensor):
if self[2].name and self[2].name is not None:
s += self[2].name
else:
s += 'tensor-' + hex(id(self[2]))
else:
s += str(self[2])
s += ')'
return s
def __repr__(self):
return self.__str__()
def key(self):
"""
Returns a key for identifying the optree. The key is depended on the ops
and the id of the tensors. Since __eq__ is overloaded, need to manage
the hashing of the OpTreeNode manually.
Returns:
tuple: optree key
"""
stack = self.traverse(list())
for i in range(len(stack)):
if type(stack[i]) is dict:
if 'axis' in stack[i]:
stack[i] = (stack[i]['op'], stack[i]['axis'])
else:
stack[i] = (stack[i]['op'])
return tuple(stack)
def intrinsic_key_maps(self):
"""
Returns the intrinsic key, tensor_index_map and index_tensor_map
for the purpose of identifying a optree. The key is depended on the ops
tensors dimensions and the relaion among the tensors.
x0 * x1 + x0 * x2 will have the same intrinsic key as y0 * y1 + y0 * y2,
if xi and yi have the same shape.
In tensor_index_map and index_tensor_map, tensors has a one-to-one
mapping with indices. The index of the tensor is depended on the first
occurance of the tensor in the post-order traversal of the optree.
Returns:
(intrinsic_key, tensor_index_map, index_tensor_map)
"""
stack = self.traverse(list())
tensor_index = 0
tensor_index_map = {}
index_tensor_map = {}
for i in range(len(stack)):
if type(stack[i]) is dict:
if 'axis' in stack[i]:
stack[i] = (stack[i]['op'], stack[i]['axis'])
else:
stack[i] = (stack[i]['op'])
elif isinstance(stack[i], Tensor):
# use interger to replace tensor
if stack[i] in tensor_index_map:
stack[i] = (tensor_index_map[stack[i]], stack[i].shape)
else:
# put tensor in dict
tensor_index_map[stack[i]] = tensor_index
index_tensor_map[tensor_index] = stack[i]
stack[i] = (tensor_index, stack[i].shape)
tensor_index += 1
return (tuple(stack), tensor_index_map, index_tensor_map)
@staticmethod
def build(op, a, b, out=None, **kwargs):
"""
Build OpTreeNode.
Arguments:
a (OpTreeNode, Tensor, numeric): left-hand side operand.
b (OpTreeNode, Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
not None, the op-tree will be executed.
kwargs: optional argument such as axis of the reducion.
"""
# check type
for arg in (a, b):
if not isinstance(arg, (int, float, Tensor, OpTreeNode, type(None))):
return NotImplemented
# get shape
out_shape = [1, 1]
if isinstance(a, (OpTreeNode, Tensor)):
a_shape = a.shape
elif isinstance(a, (float, int)):
a_shape = [1, 1]
else:
a_shape = [0, 0]
if isinstance(b, (OpTreeNode, Tensor)):
b_shape = b.shape
elif isinstance(b, (float, int)):
b_shape = [1, 1]
else:
b_shape = [0, 0]
# TODO: fix shape in smarter way
if len(a_shape) == 1:
a_shape = a_shape + (1,)
if len(b_shape) == 1:
b_shape = b_shape + (1,)
if op in OpCollection.ew_ops:
for i in range(2):
out_shape[i] = max(a_shape[i], b_shape[i])
elif op in OpCollection.reduction_ops:
if "axis" in kwargs:
out_shape = list(a_shape)
out_shape[kwargs["axis"]] = 1
else:
pass # [1, 1]
elif op == "assign":
out_shape = a_shape
elif op == "dot":
assert (len(a_shape) == len(b_shape) and len(b_shape) == 2 and
a_shape[1] == b_shape[0])
out_shape = (a_shape[0], b_shape[1])
elif op == "transpose":
assert b is None
out_shape = tuple(reversed(a_shape))
else:
raise TypeError("%s is not a valid operation" % op)
out_shape = tuple(out_shape)
# build op dict
op_dict = {"op": op, "shape": out_shape}
op_dict.update(kwargs)
node = OpTreeNode(op_dict, a, b)
# execute explicit assignment
if op == "assign":
return node.execute()
# passing in an out value counts as assignment
if out is not None:
return OpTreeNode({"op": "assign"}, out, node).execute()
# delay execution until assignment
return node
def execute(self):
"""
Execute the optree. When calling `execute()`, there must be one and only
one `assign` operation at the very top of the op-tree. The corresponding
backend's execute function will be called.
"""
assert(self[0]["op"] == "assign")
backend = self[1].backend
if isinstance(backend, Backend):
return backend.execute(self)
else:
raise NotImplementedError()
def traverse(self, stack):
"""
Post order walk op tree and produce postfix stack
Arguments:
stack (list): user shall give empty list like `list()`, then it's
used recursively to construct the post-order stack.
"""
# Left
if isinstance(self[1], OpTreeNode):
self[1].traverse(stack)
elif self[1] is not None:
stack.append(self[1])
# Right
if isinstance(self[2], OpTreeNode):
self[2].traverse(stack)
elif self[2] is not None:
stack.append(self[2])
stack.append(self[0])
return stack
@property
def T(self):
return OpTreeNode.build("transpose", self, None)
def transpose(self, out=None):
"""
Return a transposed view of the data.
"""
if out:
return OpTreeNode.build("assign", out, self.T)
return self.T
@staticmethod
def optree_to_list(optree):
"""
convert optree to list of lists recursively
"""
if isinstance(optree, OpTreeNode):
return list(map(OpTreeNode.optree_to_list, optree))
else:
return optree
@staticmethod
def list_to_optree(l):
"""
convert list to optree recursively
"""
if isinstance(l, list):
return OpTreeNode(*map(OpTreeNode.list_to_optree, l))
else:
return l
@property
def shape(self):
"""
return the shape of the OpTreeNode
"""
if isinstance(self, OpTreeNode):
return self[0]['shape']
if isinstance(self, Tensor):
return self.shape
# scalar
return (1, 1)
@staticmethod
def _pretty_print(node):
operators = {'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'pow': '**'}
s = ''
if isinstance(node, Tensor):
if node.name:
s = node.name
else:
s = 'tensor-' + hex(id(node))
elif isinstance(node, OpTreeNode):
if node[2]:
s += OpTreeNode._pretty_print(node[1]) + ' '
if node[0]['op'] in operators:
s += operators[node[0]['op']]
else:
s += node[0]['op']
s += ' ' + OpTreeNode._pretty_print(node[2])
else:
s = node[0]['op'] + ' ' + OpTreeNode._pretty_print(node[1])
s = '(' + s + ')'
else:
s = str(node) # TODO
s = '(' + s + ')'
return s
def pp(self):
"""
Pretty print of the optree
Arguments:
node (OpTreeNode): the top node of the op-tree to print
Returns:
str: string representation of the op-tree
"""
return OpTreeNode._pretty_print(self)
def asnumpyarray(self):
"""
Returns the evaluated value of the optree as a host numpy.ndarray.
Allocates new memory, usually used for debug.
Returns:
numpy.ndarray: evaluated value
"""
return self.astensor().get()
def astensor(self):
"""
Returns the evaluated value of the optree as a Tensor.
Allocates new memory, usually used for debug.
Returns:
Tensor: evaluated value
"""
stack = self.traverse(list())
be = None
for s in stack:
if isinstance(s, Tensor):
be = s.backend
break
if be is None:
raise ValueError("No tensor object in op_tree")
buf = be.empty(self.shape)
buf[:] = self
return buf
def __add__(self, other):
return self.build("add", self, other)
def __sub__(self, other):
return self.build("sub", self, other)
def __mul__(self, other):
return self.build("mul", self, other)
def __div__(self, other):
return self.build("div", self, other)
def __truediv__(self, other):
return self.build("div", self, other)
def __pow__(self, other):
return self.build("pow", self, other)
def __radd__(self, other):
return self.build("add", other, self)
def __rsub__(self, other):
return self.build("sub", other, self)
def __rmul__(self, other):
return self.build("mul", other, self)
def __rdiv__(self, other):
return self.build("div", other, self)
def __rtruediv__(self, other):
return self.build("div", other, self)
def __rpow__(self, other):
return self.build("pow", other, self)
def __eq__(self, other):
return self.build("eq", self, other)
def __ne__(self, other):
return self.build("ne", self, other)
def __lt__(self, other):
return self.build("lt", self, other)
def __le__(self, other):
return self.build("le", self, other)
def __gt__(self, other):
return self.build("gt", self, other)
def __ge__(self, other):
return self.build("ge", self, other)
def __abs__(self):
return self.build("abs", self, None)
def __neg__(self):
return self.build("neg", self, None)
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n# Copyright (C) 2008, OLPC\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version ... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# Copyright (C) 2008, OLPC\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation;... | ```python
# Copyright (C) 2008, OLPC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gtk
from gettext import gettext as _
from sugar.graphics import style
from jarabe.controlpanel.sectionview import SectionView
from jarabe.controlpanel.inlinealert import InlineAlert
class Power(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = model
self.restart_alerts = alerts
self._automatic_pm_valid = True
self._automatic_pm_change_handler = None
self.set_border_width(style.DEFAULT_SPACING * 2)
self.set_spacing(style.DEFAULT_SPACING)
group = gtk.SizeGroup(gtk.SIZE_GROUP_HORIZONTAL)
self._automatic_pm_alert_box = gtk.HBox(spacing=style.DEFAULT_SPACING)
separator_pm = gtk.HSeparator()
self.pack_start(separator_pm, expand=False)
separator_pm.show()
label_pm = gtk.Label(_('Power management'))
label_pm.set_alignment(0, 0)
self.pack_start(label_pm, expand=False)
label_pm.show()
box_pm = gtk.VBox()
box_pm.set_border_width(style.DEFAULT_SPACING * 2)
box_pm.set_spacing(style.DEFAULT_SPACING)
box_automatic_pm = gtk.HBox(spacing=style.DEFAULT_SPACING)
label_automatic_pm = gtk.Label(
_('Automatic power management (increases battery life)'))
label_automatic_pm.set_alignment(0, 0.5)
self._automatic_button = gtk.CheckButton()
self._automatic_button.set_alignment(0, 0)
box_automatic_pm.pack_start(self._automatic_button, expand=False)
box_automatic_pm.pack_start(label_automatic_pm, expand=False)
self._automatic_button.show()
label_automatic_pm.show()
group.add_widget(label_automatic_pm)
box_pm.pack_start(box_automatic_pm, expand=False)
box_automatic_pm.show()
self._automatic_pm_alert = InlineAlert()
label_automatic_pm_error = gtk.Label()
group.add_widget(label_automatic_pm_error)
self._automatic_pm_alert_box.pack_start(label_automatic_pm_error,
expand=False)
label_automatic_pm_error.show()
self._automatic_pm_alert_box.pack_start(self._automatic_pm_alert,
expand=False)
box_pm.pack_end(self._automatic_pm_alert_box, expand=False)
self._automatic_pm_alert_box.show()
if 'automatic_pm' in self.restart_alerts:
self._automatic_pm_alert.props.msg = self.restart_msg
self._automatic_pm_alert.show()
self.pack_start(box_pm, expand=False)
box_pm.show()
self.setup()
def setup(self):
try:
automatic_state = self._model.get_automatic_pm()
except Exception, detail:
self._automatic_pm_alert.props.msg = detail
self._automatic_pm_alert.show()
else:
self._automatic_button.set_active(automatic_state)
self._automatic_pm_valid = True
self.needs_restart = False
self._automatic_pm_change_handler = self._automatic_button.connect( \
'toggled', self.__automatic_pm_toggled_cb)
def undo(self):
self._automatic_button.disconnect(self._automatic_pm_change_handler)
self._model.undo()
self._automatic_pm_alert.hide()
def _validate(self):
if self._automatic_pm_valid:
self.props.is_valid = True
else:
self.props.is_valid = False
def __automatic_pm_toggled_cb(self, widget, data=None):
state = widget.get_active()
try:
self._model.set_automatic_pm(state)
except Exception, detail:
print detail
self._automatic_pm_alert.props.msg = detail
else:
self._automatic_pm_valid = True
self._validate()
return False
``` |
[
{
"content": "```python\nfrom collections import defaultdict, Iterable\nimport itertools\n\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import zip_longest\n\n\nNAME_COUNTERS = defaultdict(lambda: 0)\n\n\ndef generate_name(obj):\n \"\"\"Generate a unique name for the object in question\n\n ... | [
{
"content": "<|memory_start|>```python\nfrom collections import defaultdict, Iterable\nimport itertools\n\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import zip_longest\n\n\nNAME_COUNTERS = defaultdict(lambda: 0)\n\n\ndef generate_name(obj):\n \"\"\"Generate a unique name for the object in ... | ```python
from collections import defaultdict, Iterable
import itertools
import numpy as np
import tensorflow as tf
from six.moves import zip_longest
NAME_COUNTERS = defaultdict(lambda: 0)
def generate_name(obj):
"""Generate a unique name for the object in question
Returns a name of the form "{calling_class_name}_{count}"
"""
global NAME_COUNTERS
calling_name = obj.__name__
NAME_COUNTERS[calling_name] += 1
return '{0}_{1}'.format(calling_name, NAME_COUNTERS[calling_name])
class classproperty(object):
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter(owner)
def grouper(iterable, n=2, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def flatten(l):
"""Recursivly flattens a interable argument, ignoring strings and bytes.
Taken from: http://stackoverflow.com/a/2158532
"""
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
for sub in flatten(el):
yield sub
else:
yield el
def is_finite(obj):
return isinstance(obj, tf.Tensor) or np.isfinite(obj)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# Copyright 2014, 2018 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the Lice... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# Copyright 2014, 2018 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a ... | ```python
# Copyright 2014, 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Connection to PowerVM hypervisor through NovaLink."""
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from pypowervm import adapter as pvm_apt
from pypowervm import const as pvm_const
from pypowervm import exceptions as pvm_exc
from pypowervm.helpers import log_helper as log_hlp
from pypowervm.helpers import vios_busy as vio_hlp
from pypowervm.tasks import partition as pvm_par
from pypowervm.tasks import storage as pvm_stor
from pypowervm.tasks import vterm as pvm_vterm
from pypowervm.wrappers import managed_system as pvm_ms
import six
from taskflow.patterns import linear_flow as tf_lf
from nova.compute import task_states
from nova import conf as cfg
from nova.console import type as console_type
from nova import exception as exc
from nova.i18n import _
from nova import image
from nova import rc_fields
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.powervm import host as pvm_host
from nova.virt.powervm.tasks import base as tf_base
from nova.virt.powervm.tasks import image as tf_img
from nova.virt.powervm.tasks import network as tf_net
from nova.virt.powervm.tasks import storage as tf_stg
from nova.virt.powervm.tasks import vm as tf_vm
from nova.virt.powervm import vm
from nova.virt.powervm import volume
from nova.virt.powervm.volume import fcvscsi
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DISK_ADPT_NS = 'nova.virt.powervm.disk'
DISK_ADPT_MAPPINGS = {
'localdisk': 'localdisk.LocalStorage',
'ssp': 'ssp.SSPDiskAdapter'
}
class PowerVMDriver(driver.ComputeDriver):
"""PowerVM NovaLink Implementation of Compute Driver.
https://wiki.openstack.org/wiki/PowerVM
"""
def __init__(self, virtapi):
# NOTE(edmondsw) some of these will be dynamic in future, so putting
# capabilities on the instance rather than on the class.
self.capabilities = {
'has_imagecache': False,
'supports_evacuate': False,
'supports_migrate_to_same_host': False,
'supports_attach_interface': True,
'supports_device_tagging': False,
'supports_tagged_attach_interface': False,
'supports_tagged_attach_volume': False,
'supports_extend_volume': True,
'supports_multiattach': False,
'supports_trusted_certs': False,
}
super(PowerVMDriver, self).__init__(virtapi)
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function.
Includes catching up with currently running VMs on the given host.
"""
# Build the adapter. May need to attempt the connection multiple times
# in case the PowerVM management API service is starting.
# TODO(efried): Implement async compute service enable/disable like
# I73a34eb6e0ca32d03e54d12a5e066b2ed4f19a61
self.adapter = pvm_apt.Adapter(
pvm_apt.Session(conn_tries=60),
helpers=[log_hlp.log_helper, vio_hlp.vios_busy_retry_helper])
# Make sure the Virtual I/O Server(s) are available.
pvm_par.validate_vios_ready(self.adapter)
self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
# Do a scrub of the I/O plane to make sure the system is in good shape
LOG.info("Clearing stale I/O connections on driver init.")
pvm_stor.ComprehensiveScrub(self.adapter).execute()
# Initialize the disk adapter
self.disk_dvr = importutils.import_object_ns(
DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()],
self.adapter, self.host_wrapper.uuid)
self.image_api = image.API()
LOG.info("The PowerVM compute driver has been initialized.")
@staticmethod
def _log_operation(op, instance):
"""Log entry point of driver operations."""
LOG.info('Operation: %(op)s. Virtual machine display name: '
'%(display_name)s, name: %(name)s',
{'op': op, 'display_name': instance.display_name,
'name': instance.name}, instance=instance)
def get_info(self, instance):
"""Get the current status of an instance.
:param instance: nova.objects.instance.Instance object
:returns: An InstanceInfo object.
"""
return vm.get_vm_info(self.adapter, instance)
def list_instances(self):
"""Return the names of all the instances known to the virt host.
:return: VM Names as a list.
"""
return vm.get_lpar_names(self.adapter)
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
return [CONF.host]
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and as part of a
periodic task.
:param nodename: Node from which the caller wants to get resources.
A driver that manages only one node can safely ignore
this.
:return: Dictionary describing resources.
"""
# TODO(efried): Switch to get_inventory, per blueprint
# custom-resource-classes-pike
# Do this here so it refreshes each time this method is called.
self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
return self._get_available_resource()
def _get_available_resource(self):
# Get host information
data = pvm_host.build_host_resource_from_ms(self.host_wrapper)
# Add the disk information
data["local_gb"] = self.disk_dvr.capacity
data["local_gb_used"] = self.disk_dvr.capacity_used
return data
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree with current provider and inventory data.
:param nova.compute.provider_tree.ProviderTree provider_tree:
A nova.compute.provider_tree.ProviderTree object representing all
the providers in the tree associated with the compute node, and any
sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
trait) associated via aggregate with any of those providers (but
not *their* tree- or aggregate-associated providers), as currently
known by placement.
:param nodename:
String name of the compute node (i.e.
ComputeNode.hypervisor_hostname) for which the caller is requesting
updated provider information.
:param allocations: Currently ignored by this driver.
"""
# Get (legacy) resource information. Same as get_available_resource,
# but we don't need to refresh self.host_wrapper as it was *just*
# refreshed by get_available_resource in the resource tracker's
# update_available_resource flow.
data = self._get_available_resource()
# NOTE(yikun): If the inv record does not exists, the allocation_ratio
# will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio
# is set, and fallback to use the initial_xxx_allocation_ratio
# otherwise.
inv = provider_tree.data(nodename).inventory
ratios = self._get_allocation_ratios(inv)
cpu_reserved = CONF.reserved_host_cpus
mem_reserved = CONF.reserved_host_memory_mb
disk_reserved = self._get_reserved_host_disk_gb_from_config()
inventory = {
rc_fields.ResourceClass.VCPU: {
'total': data['vcpus'],
'max_unit': data['vcpus'],
'allocation_ratio': ratios[rc_fields.ResourceClass.VCPU],
'reserved': cpu_reserved,
},
rc_fields.ResourceClass.MEMORY_MB: {
'total': data['memory_mb'],
'max_unit': data['memory_mb'],
'allocation_ratio': ratios[rc_fields.ResourceClass.MEMORY_MB],
'reserved': mem_reserved,
},
rc_fields.ResourceClass.DISK_GB: {
# TODO(efried): Proper DISK_GB sharing when SSP driver in play
'total': int(data['local_gb']),
'max_unit': int(data['local_gb']),
'allocation_ratio': ratios[rc_fields.ResourceClass.DISK_GB],
'reserved': disk_reserved,
},
}
provider_tree.update_inventory(nodename, inventory)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None):
"""Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
:param nova.objects.ImageMeta image_meta:
The metadata of the image of the instance.
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param allocations: Information about resources allocated to the
instance via placement, of the form returned by
SchedulerReportClient.get_allocations_for_consumer.
:param network_info: instance network information
:param block_device_info: Information about block devices to be
attached to the instance.
"""
self._log_operation('spawn', instance)
# Define the flow
flow_spawn = tf_lf.Flow("spawn")
# This FeedTask accumulates VIOS storage connection operations to be
# run in parallel. Include both SCSI and fibre channel mappings for
# the scrubber.
stg_ftsk = pvm_par.build_active_vio_feed_task(
self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
flow_spawn.add(tf_vm.Create(
self.adapter, self.host_wrapper, instance, stg_ftsk))
# Create a flow for the IO
flow_spawn.add(tf_net.PlugVifs(
self.virtapi, self.adapter, instance, network_info))
flow_spawn.add(tf_net.PlugMgmtVif(
self.adapter, instance))
# Create the boot image.
flow_spawn.add(tf_stg.CreateDiskForImg(
self.disk_dvr, context, instance, image_meta))
# Connects up the disk to the LPAR
flow_spawn.add(tf_stg.AttachDisk(
self.disk_dvr, instance, stg_ftsk=stg_ftsk))
# Extract the block devices.
bdms = driver.block_device_info_get_mapping(block_device_info)
# Determine if there are volumes to connect. If so, add a connection
# for each type.
for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms,
stg_ftsk=stg_ftsk):
# Connect the volume. This will update the connection_info.
flow_spawn.add(tf_stg.AttachVolume(vol_drv))
# If the config drive is needed, add those steps. Should be done
# after all the other I/O.
if configdrive.required_by(instance):
flow_spawn.add(tf_stg.CreateAndConnectCfgDrive(
self.adapter, instance, injected_files, network_info,
stg_ftsk, admin_pass=admin_password))
# Add the transaction manager flow at the end of the 'I/O
# connection' tasks. This will run all the connections in parallel.
flow_spawn.add(stg_ftsk)
# Last step is to power on the system.
flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))
# Run the flow.
tf_base.run(flow_spawn, instance=instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy the specified instance from the Hypervisor.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param network_info: instance network information
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
"""
# TODO(thorst, efried) Add resize checks for destroy
self._log_operation('destroy', instance)
def _setup_flow_and_run():
# Define the flow
flow = tf_lf.Flow("destroy")
# Power Off the LPAR. If its disks are about to be deleted, issue a
# hard shutdown.
flow.add(tf_vm.PowerOff(self.adapter, instance,
force_immediate=destroy_disks))
# The FeedTask accumulates storage disconnection tasks to be run in
# parallel.
stg_ftsk = pvm_par.build_active_vio_feed_task(
self.adapter, xag=[pvm_const.XAG.VIO_SMAP])
# Call the unplug VIFs task. While CNAs get removed from the LPAR
# directly on the destroy, this clears up the I/O Host side.
flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))
# Add the disconnect/deletion of the vOpt to the transaction
# manager.
if configdrive.required_by(instance):
flow.add(tf_stg.DeleteVOpt(
self.adapter, instance, stg_ftsk=stg_ftsk))
# Extract the block devices.
bdms = driver.block_device_info_get_mapping(block_device_info)
# Determine if there are volumes to detach. If so, remove each
# volume (within the transaction manager)
for bdm, vol_drv in self._vol_drv_iter(
context, instance, bdms, stg_ftsk=stg_ftsk):
flow.add(tf_stg.DetachVolume(vol_drv))
# Detach the disk storage adapters
flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))
# Accumulated storage disconnection tasks next
flow.add(stg_ftsk)
# Delete the storage disks
if destroy_disks:
flow.add(tf_stg.DeleteDisk(self.disk_dvr))
# TODO(thorst, efried) Add LPAR id based scsi map clean up task
flow.add(tf_vm.Delete(self.adapter, instance))
# Build the engine & run!
tf_base.run(flow, instance=instance)
try:
_setup_flow_and_run()
except exc.InstanceNotFound:
LOG.debug('VM was not found during destroy operation.',
instance=instance)
return
except pvm_exc.Error as e:
LOG.exception("PowerVM error during destroy.", instance=instance)
# Convert to a Nova exception
raise exc.InstanceTerminationFailure(reason=six.text_type(e))
def snapshot(self, context, instance, image_id, update_task_state):
"""Snapshots the specified instance.
:param context: security context
:param instance: nova.objects.instance.Instance
:param image_id: Reference to a pre-created image that will hold the
snapshot.
:param update_task_state: Callback function to update the task_state
on the instance while the snapshot operation progresses. The
function takes a task_state argument and an optional
expected_task_state kwarg which defaults to
nova.compute.task_states.IMAGE_SNAPSHOT. See
nova.objects.instance.Instance.save for expected_task_state usage.
"""
if not self.disk_dvr.capabilities.get('snapshot'):
raise exc.NotSupportedWithOption(
message=_("The snapshot operation is not supported in "
"conjunction with a [powervm]/disk_driver setting "
"of %s.") % CONF.powervm.disk_driver)
self._log_operation('snapshot', instance)
# Define the flow.
flow = tf_lf.Flow("snapshot")
# Notify that we're starting the process.
flow.add(tf_img.UpdateTaskState(update_task_state,
task_states.IMAGE_PENDING_UPLOAD))
# Connect the instance's boot disk to the management partition, and
# scan the scsi bus and bring the device into the management partition.
flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance))
# Notify that the upload is in progress.
flow.add(tf_img.UpdateTaskState(
update_task_state, task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD))
# Stream the disk to glance.
flow.add(tf_img.StreamToGlance(context, self.image_api, image_id,
instance))
# Disconnect the boot disk from the management partition and delete the
# device.
flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance))
# Run the flow.
tf_base.run(flow, instance=instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
self._log_operation('power_off', instance)
force_immediate = (timeout == 0)
timeout = timeout or None
vm.power_off(self.adapter, instance, force_immediate=force_immediate,
timeout=timeout)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
:param instance: nova.objects.instance.Instance
"""
self._log_operation('power_on', instance)
vm.power_on(self.adapter, instance)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: nova.objects.instance.Instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
:param block_device_info: Info pertaining to attached volumes
:param bad_volumes_callback: Function to handle any bad volumes
encountered
"""
self._log_operation(reboot_type + ' reboot', instance)
vm.reboot(self.adapter, instance, reboot_type == 'HARD')
# pypowervm exceptions are sufficient to indicate real failure.
# Otherwise, pypowervm thinks the instance is up.
def attach_interface(self, context, instance, image_meta, vif):
"""Attach an interface to the instance."""
self.plug_vifs(instance, [vif])
def detach_interface(self, context, instance, vif):
"""Detach an interface from the instance."""
self.unplug_vifs(instance, [vif])
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._log_operation('plug_vifs', instance)
# Define the flow
flow = tf_lf.Flow("plug_vifs")
# Get the LPAR Wrapper
flow.add(tf_vm.Get(self.adapter, instance))
# Run the attach
flow.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance,
network_info))
# Run the flow
try:
tf_base.run(flow, instance=instance)
except exc.InstanceNotFound:
raise exc.VirtualInterfacePlugException(
_("Plug vif failed because instance %s was not found.")
% instance.name)
except Exception:
LOG.exception("PowerVM error plugging vifs.", instance=instance)
raise exc.VirtualInterfacePlugException(
_("Plug vif failed because of an unexpected error."))
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._log_operation('unplug_vifs', instance)
# Define the flow
flow = tf_lf.Flow("unplug_vifs")
# Run the detach
flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))
# Run the flow
try:
tf_base.run(flow, instance=instance)
except exc.InstanceNotFound:
LOG.warning('VM was not found during unplug operation as it is '
'already possibly deleted.', instance=instance)
except Exception:
LOG.exception("PowerVM error trying to unplug vifs.",
instance=instance)
raise exc.InterfaceDetachFailed(instance_uuid=instance.uuid)
def get_vnc_console(self, context, instance):
"""Get connection info for a vnc console.
:param context: security context
:param instance: nova.objects.instance.Instance
:return: An instance of console.type.ConsoleVNC
"""
self._log_operation('get_vnc_console', instance)
lpar_uuid = vm.get_pvm_uuid(instance)
# Build the connection to the VNC.
host = CONF.vnc.server_proxyclient_address
# TODO(thorst, efried) Add the x509 certificate support when it lands
try:
# Open up a remote vterm
port = pvm_vterm.open_remotable_vnc_vterm(
self.adapter, lpar_uuid, host, vnc_path=lpar_uuid)
# Note that the VNC viewer will wrap the internal_access_path with
# the HTTP content.
return console_type.ConsoleVNC(host=host, port=port,
internal_access_path=lpar_uuid)
except pvm_exc.HttpError as e:
with excutils.save_and_reraise_exception(logger=LOG) as sare:
# If the LPAR was not found, raise a more descriptive error
if e.response.status == 404:
sare.reraise = False
raise exc.InstanceNotFound(instance_id=instance.uuid)
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?
:param instance: the instance object.
:returns: Boolean value. If True deallocate networks on reschedule.
"""
return True
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the volume to the instance using the connection_info.
:param context: security context
:param connection_info: Volume connection information from the block
device mapping
:param instance: nova.objects.instance.Instance
:param mountpoint: Unused
:param disk_bus: Unused
:param device_type: Unused
:param encryption: Unused
"""
self._log_operation('attach_volume', instance)
# Define the flow
flow = tf_lf.Flow("attach_volume")
# Build the driver
vol_drv = volume.build_volume_driver(self.adapter, instance,
connection_info)
# Add the volume attach to the flow.
flow.add(tf_stg.AttachVolume(vol_drv))
# Run the flow
tf_base.run(flow, instance=instance)
# The volume connector may have updated the system metadata. Save
# the instance to persist the data. Spawn/destroy auto saves instance,
# but the attach does not. Detach does not need this save - as the
# detach flows do not (currently) modify system metadata. May need
# to revise in the future as volume connectors evolve.
instance.save()
def detach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
"""Detach the volume attached to the instance.
:param context: security context
:param connection_info: Volume connection information from the block
device mapping
:param instance: nova.objects.instance.Instance
:param mountpoint: Unused
:param encryption: Unused
"""
self._log_operation('detach_volume', instance)
# Define the flow
flow = tf_lf.Flow("detach_volume")
# Get a volume adapter for this volume
vol_drv = volume.build_volume_driver(self.adapter, instance,
connection_info)
# Add a task to detach the volume
flow.add(tf_stg.DetachVolume(vol_drv))
# Run the flow
tf_base.run(flow, instance=instance)
def extend_volume(self, connection_info, instance):
"""Extend the disk attached to the instance.
:param dict connection_info: The connection for the extended volume.
:param nova.objects.instance.Instance instance:
The instance whose volume gets extended.
:return: None
"""
vol_drv = volume.build_volume_driver(
self.adapter, instance, connection_info)
vol_drv.extend_volume()
def _vol_drv_iter(self, context, instance, bdms, stg_ftsk=None):
"""Yields a bdm and volume driver.
:param context: security context
:param instance: nova.objects.instance.Instance
:param bdms: block device mappings
:param stg_ftsk: storage FeedTask
"""
# Get a volume driver for each volume
for bdm in bdms or []:
conn_info = bdm.get('connection_info')
vol_drv = volume.build_volume_driver(self.adapter, instance,
conn_info, stg_ftsk=stg_ftsk)
yield bdm, vol_drv
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing information about
the system that will be making the connection.
:param instance: nova.objects.instance.Instance
"""
# Put the values in the connector
connector = {}
wwpn_list = fcvscsi.wwpns(self.adapter)
if wwpn_list is not None:
connector["wwpns"] = wwpn_list
connector["multipath"] = False
connector['host'] = CONF.host
connector['initiator'] = None
return connector
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n# Copyright Iris contributors\n#\n# This file is part of Iris and is released under the LGPL license.\n# See COPYING and COPYING.LESSER in the root of the repository for full\n# licensing details.\n\"\"\"\nA package providing :c... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# Copyright Iris contributors\n#\n# This file is part of Iris and is released under the LGPL license.\n# See COPYING and COPYING.LESSER in the root of the repository for full\n# licensing details.\n\"\"\"\nA pack... | ```python
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
A package providing :class:`iris.cube.Cube` analysis support.
This module defines a suite of :class:`~iris.analysis.Aggregator` instances,
which are used to specify the statistical measure to calculate over a
:class:`~iris.cube.Cube`, using methods such as
:meth:`~iris.cube.Cube.aggregated_by` and :meth:`~iris.cube.Cube.collapsed`.
The :class:`~iris.analysis.Aggregator` is a convenience class that allows
specific statistical aggregation operators to be defined and instantiated.
These operators can then be used to collapse, or partially collapse, one or
more dimensions of a :class:`~iris.cube.Cube`, as discussed in
:ref:`cube-statistics`.
In particular, :ref:`cube-statistics-collapsing` discusses how to use
:const:`MEAN` to average over one dimension of a :class:`~iris.cube.Cube`,
and also how to perform weighted :ref:`cube-statistics-collapsing-average`.
While :ref:`cube-statistics-aggregated-by` shows how to aggregate similar
groups of data points along a single dimension, to result in fewer points
in that dimension.
The gallery contains several interesting worked examples of how an
:class:`~iris.analysis.Aggregator` may be used, including:
* :ref:`sphx_glr_generated_gallery_meteorology_plot_COP_1d.py`
* :ref:`sphx_glr_generated_gallery_general_plot_SOI_filtering.py`
* :ref:`sphx_glr_generated_gallery_meteorology_plot_hovmoller.py`
* :ref:`sphx_glr_generated_gallery_meteorology_plot_lagged_ensemble.py`
* :ref:`sphx_glr_generated_gallery_general_plot_custom_aggregation.py`
"""
from collections import OrderedDict
from collections.abc import Iterable
from functools import wraps
import dask.array as da
import numpy as np
import numpy.ma as ma
import scipy.interpolate
import scipy.stats.mstats
import iris._lazy_data
from iris.analysis._area_weighted import AreaWeightedRegridder
from iris.analysis._interpolation import (
EXTRAPOLATION_MODES,
RectilinearInterpolator,
)
from iris.analysis._regrid import CurvilinearRegridder, RectilinearRegridder
import iris.coords
from iris.exceptions import LazyAggregatorError
__all__ = (
"COUNT",
"GMEAN",
"HMEAN",
"MAX",
"MEAN",
"MEDIAN",
"MIN",
"PEAK",
"PERCENTILE",
"PROPORTION",
"RMS",
"STD_DEV",
"SUM",
"VARIANCE",
"WPERCENTILE",
"Aggregator",
"WeightedAggregator",
"clear_phenomenon_identity",
"Linear",
"AreaWeighted",
"Nearest",
"UnstructuredNearest",
"PointInCell",
)
class _CoordGroup:
"""
Represents a list of coordinates, one for each given cube. Which can be
operated on conveniently.
"""
def __init__(self, coords, cubes):
self.coords = coords
self.cubes = cubes
def __iter__(self):
return iter(self.coords)
def __getitem__(self, key):
return list(self).__getitem__(key)
def _first_coord_w_cube(self):
"""
Return the first none None coordinate, and its associated cube
as (cube, coord).
"""
return next(
filter(
lambda cube_coord: cube_coord[1] is not None,
zip(self.cubes, self.coords),
)
)
def __repr__(self):
# No exact repr, so a helpful string is given instead
return (
"["
+ ", ".join(
[
coord.name() if coord is not None else "None"
for coord in self
]
)
+ "]"
)
def name(self):
_, first_coord = self._first_coord_w_cube()
return first_coord.name()
def _oid_tuple(self):
"""Return a tuple of object ids for this _CoordGroup's coordinates"""
return tuple((id(coord) for coord in self))
def __hash__(self):
return hash(self._oid_tuple())
def __eq__(self, other):
# equals is overridden to guarantee that two _CoordGroups are only
# equal if their coordinates are the same objects (by object id)
# this is useful in the context of comparing _CoordGroups if they are
# part of a set operation such as that in coord_compare, but
# not useful in many other circumstances (i.e. deepcopying a
# _CoordGroups instance would mean that copy != original)
result = NotImplemented
if isinstance(other, _CoordGroup):
result = self._oid_tuple() == other._oid_tuple()
return result
def matches(self, predicate, default_val=True):
"""
Apply a function to a coord group returning a list of bools
for each coordinate.
The predicate function should take exactly 2 arguments (cube, coord)
and return a boolean.
If None is in the coord group then return True.
"""
for cube, coord in zip(self.cubes, self.coords):
if coord is None:
yield default_val
else:
yield predicate(cube, coord)
def matches_all(self, predicate):
"""
Return whether all coordinates match the given function after running
it through :meth:`matches`.
If None is in the coord group then return True.
"""
return all(self.matches(predicate))
def matches_any(self, predicate):
"""
Return whether any coordinates match the given function after running
it through :meth:`matches`.
If None is in the coord group then return True.
"""
return any(self.matches(predicate))
def _dimensional_metadata_comparison(*cubes, object_get=None):
"""
Convenience function to help compare coordinates, cell-measures or
ancillary-variables, on one or more cubes, by their metadata.
.. Note::
Up to Iris 2.x, this _used_ to be the public API method
"iris.analysis.coord_comparison".
It has since been generalised, and made private.
However, the cube elements handled are still mostly referred to as 'coords' /
'coordinates' throughout, for simplicity : In fact, they will all be either
`iris.coords.Coord`, `iris.coords.CellMeasure` or
`iris.coords.AncillaryVariable`, the cube element type being controlled by the
'object_get' keyword.
Args:
* cubes (iterable of `iris.cube.Cube`):
a set of cubes whose coordinates, cell-measures or ancillary-variables are to
be compared.
Kwargs:
* object_get (callable(cube) or None):
If not None, this must be a cube method returning a list of all cube elements
of the required type, i.e. one of `iris.cube.Cube.coords`,
`iris.cube.Cube.cell_measures`, or `iris.cube.Cube.ancillary_variables`.
If not specified, defaults to `iris.cube.Cube.coords`
Returns:
result (dict mapping string: list of _CoordGroup):
A dictionary whose keys are match categories and values are groups of
coordinates, cell-measures or ancillary-variables.
The values of the returned dictionary are lists of _CoordGroup representing
grouped coordinates. Each _CoordGroup contains all the input 'cubes', and a
matching list of the coord within each cube that matches some specific CoordDefn
(or maybe None).
The keys of the returned dictionary are strings naming 'categories' : Each
represents a statement,
"Given these cubes list the coordinates which,
when grouped by metadata, are/have..."
Returned Keys:
* grouped_coords
A list of coordinate groups of all the coordinates grouped together
by their coordinate definition
* ungroupable
A list of coordinate groups which contain at least one None,
meaning not all Cubes provide an equivalent coordinate
* not_equal
A list of coordinate groups of which not all are equal
(superset of ungroupable)
* no_data_dimension
A list of coordinate groups of which all have no data dimensions on
their respective cubes
* scalar
A list of coordinate groups of which all have shape (1, )
* non_equal_data_dimension
A list of coordinate groups of which not all have the same
data dimension on their respective cubes
* non_equal_shape
A list of coordinate groups of which not all have the same shape
* equal_data_dimension
A list of coordinate groups of which all have the same data dimension
on their respective cubes
* equal
A list of coordinate groups of which all are equal
* ungroupable_and_dimensioned
A list of coordinate groups of which not all cubes had an equivalent
(in metadata) coordinate which also describe a data dimension
* dimensioned
A list of coordinate groups of which all describe a data dimension on
their respective cubes
* ignorable
A list of scalar, ungroupable non_equal coordinate groups
* resamplable
A list of equal, different data dimensioned coordinate groups
* transposable
A list of non equal, same data dimensioned, non scalar coordinate groups
Example usage::
result = _dimensional_metadata_comparison(cube1, cube2)
print('All equal coordinates: ', result['equal'])
"""
if object_get is None:
from iris.cube import Cube
object_get = Cube.coords
all_coords = [object_get(cube) for cube in cubes]
grouped_coords = []
# set of coordinates id()s of coordinates which have been processed
processed_coords = set()
# iterate through all cubes, then by each coordinate in the cube looking
# for coordinate groups
for cube, coords in zip(cubes, all_coords):
for coord in coords:
# if this coordinate has already been processed, then continue on
# to the next one
if id(coord) in processed_coords:
continue
# setup a list to hold the coordinates which will be turned into a
# coordinate group and added to the grouped_coords list
this_coords_coord_group = []
for other_cube_i, other_cube in enumerate(cubes):
# setup a variable to hold the coordinate which will be added
# to the coordinate group for this cube
coord_to_add_to_group = None
# don't bother checking if the current cube is the one we are
# trying to match coordinates too
if other_cube is cube:
coord_to_add_to_group = coord
else:
# iterate through all coordinates in this cube
for other_coord in all_coords[other_cube_i]:
# for optimisation, check that the name is equivalent
# *before* checking all of the metadata is equivalent
eq = (
other_coord is coord
or other_coord.name() == coord.name()
and other_coord.metadata == coord.metadata
)
if eq:
coord_to_add_to_group = other_coord
break
# add the coordinate to the group
if coord_to_add_to_group is None:
this_coords_coord_group.append(None)
else:
this_coords_coord_group.append(coord_to_add_to_group)
# add the object id of the coordinate which is being added
# to the group to the processed coordinate list
processed_coords.add(id(coord_to_add_to_group))
# add the group to the list of groups
grouped_coords.append(_CoordGroup(this_coords_coord_group, cubes))
# define some sets which will be populated in the subsequent loop
ungroupable = set()
different_shaped_coords = set()
different_data_dimension = set()
no_data_dimension = set()
scalar_coords = set()
not_equal = set()
for coord_group in grouped_coords:
first_cube, first_coord = coord_group._first_coord_w_cube()
# Get all coordinate groups which aren't complete (i.e. there is a
# None in the group)
def coord_is_None_fn(cube, coord):
return coord is None
if coord_group.matches_any(coord_is_None_fn):
ungroupable.add(coord_group)
# Get all coordinate groups which don't all equal one another
# (None -> group not all equal)
def not_equal_fn(cube, coord):
return coord != first_coord
if coord_group.matches_any(not_equal_fn):
not_equal.add(coord_group)
# Get all coordinate groups which don't all share the same shape
# (None -> group has different shapes)
def diff_shape_fn(cube, coord):
return coord.shape != first_coord.shape
if coord_group.matches_any(diff_shape_fn):
different_shaped_coords.add(coord_group)
# Get all coordinate groups which don't all share the same data
# dimension on their respective cubes
# (None -> group describes a different dimension)
def diff_data_dim_fn(cube, coord):
return coord.cube_dims(cube) != first_coord.cube_dims(first_cube)
if coord_group.matches_any(diff_data_dim_fn):
different_data_dimension.add(coord_group)
# get all coordinate groups which don't describe a dimension
# (None -> doesn't describe a dimension)
def no_data_dim_fn(cube, coord):
return coord.cube_dims(cube) == ()
if coord_group.matches_all(no_data_dim_fn):
no_data_dimension.add(coord_group)
# get all coordinate groups which don't describe a dimension
# (None -> not a scalar coordinate)
def no_data_dim_fn(cube, coord):
return coord.shape == (1,)
if coord_group.matches_all(no_data_dim_fn):
scalar_coords.add(coord_group)
result = {}
result["grouped_coords"] = set(grouped_coords)
result["not_equal"] = not_equal
result["ungroupable"] = ungroupable
result["no_data_dimension"] = no_data_dimension
result["scalar"] = scalar_coords
result["non_equal_data_dimension"] = different_data_dimension
result["non_equal_shape"] = different_shaped_coords
result["equal_data_dimension"] = (
result["grouped_coords"] - result["non_equal_data_dimension"]
)
result["equal"] = result["grouped_coords"] - result["not_equal"]
result["dimensioned"] = (
result["grouped_coords"] - result["no_data_dimension"]
)
result["ungroupable_and_dimensioned"] = (
result["ungroupable"] & result["dimensioned"]
)
result["ignorable"] = (
result["not_equal"] | result["ungroupable"]
) & result["no_data_dimension"]
result["resamplable"] = (
result["not_equal"] & result["equal_data_dimension"] - result["scalar"]
)
result["transposable"] = (
result["equal"] & result["non_equal_data_dimension"]
)
# for convenience, turn all of the sets in the dictionary into lists,
# sorted by the name of the group
for key, groups in result.items():
result[key] = sorted(groups, key=lambda group: group.name())
return result
class _Aggregator:
"""
The :class:`_Aggregator` base class provides common aggregation
functionality.
"""
def __init__(
self, cell_method, call_func, units_func=None, lazy_func=None, **kwargs
):
r"""
Create an aggregator for the given :data:`call_func`.
Args:
* cell_method (string):
Cell method definition formatter. Used in the fashion
"cell_method.format(\**kwargs)", to produce a cell-method string
which can include keyword values.
* call_func (callable):
| *Call signature*: (data, axis=None, \**kwargs)
Data aggregation function.
Returns an aggregation result, collapsing the 'axis' dimension of
the 'data' argument.
Kwargs:
* units_func (callable):
| *Call signature*: (units)
If provided, called to convert a cube's units.
Returns an :class:`cf_units.Unit`, or a
value that can be made into one.
* lazy_func (callable or None):
An alternative to :data:`call_func` implementing a lazy
aggregation. Note that, it need not support all features of the
main operation, but should raise an error in unhandled cases.
Additional kwargs::
Passed through to :data:`call_func` and :data:`lazy_func`.
Aggregators are used by cube aggregation methods such as
:meth:`~iris.cube.Cube.collapsed` and
:meth:`~iris.cube.Cube.aggregated_by`. For example::
result = cube.collapsed('longitude', iris.analysis.MEAN)
A variety of ready-made aggregators are provided in this module, such
as :data:`~iris.analysis.MEAN` and :data:`~iris.analysis.MAX`. Custom
aggregators can also be created for special purposes, see
:ref:`sphx_glr_generated_gallery_general_plot_custom_aggregation.py`
for a worked example.
"""
#: Cube cell method string.
self.cell_method = cell_method
#: Data aggregation function.
self.call_func = call_func
#: Unit conversion function.
self.units_func = units_func
#: Lazy aggregation function, may be None to indicate that a lazy
#: operation is not available.
self.lazy_func = lazy_func
self._kwargs = kwargs
def lazy_aggregate(self, data, axis, **kwargs):
"""
Perform aggregation over the data with a lazy operation, analogous to
the 'aggregate' result.
Keyword arguments are passed through to the data aggregation function
(for example, the "percent" keyword for a percentile aggregator).
This function is usually used in conjunction with update_metadata(),
which should be passed the same keyword arguments.
Args:
* data (array):
A lazy array (:class:`dask.array.Array`).
* axis (int or list of int):
The dimensions to aggregate over -- note that this is defined
differently to the 'aggregate' method 'axis' argument, which only
accepts a single dimension index.
Kwargs:
* kwargs:
All keyword arguments are passed through to the data aggregation
function.
Returns:
A lazy array representing the aggregation operation
(:class:`dask.array.Array`).
"""
if self.lazy_func is None:
msg = "{} aggregator does not support lazy operation."
raise LazyAggregatorError(msg.format(self.name()))
# Combine keyword args with `kwargs` taking priority over those
# provided to __init__.
kwargs = dict(list(self._kwargs.items()) + list(kwargs.items()))
return self.lazy_func(data, axis=axis, **kwargs)
def aggregate(self, data, axis, **kwargs):
"""
Perform the aggregation function given the data.
Keyword arguments are passed through to the data aggregation function
(for example, the "percent" keyword for a percentile aggregator).
This function is usually used in conjunction with update_metadata(),
which should be passed the same keyword arguments.
Args:
* data (array):
Data array.
* axis (int):
Axis to aggregate over.
Kwargs:
* mdtol (float):
Tolerance of missing data. The value returned will be masked if
the fraction of data to missing data is less than or equal to
mdtol. mdtol=0 means no missing data is tolerated while mdtol=1
will return the resulting value from the aggregation function.
Defaults to 1.
* kwargs:
All keyword arguments apart from those specified above, are
passed through to the data aggregation function.
Returns:
The aggregated data.
"""
kwargs = dict(list(self._kwargs.items()) + list(kwargs.items()))
mdtol = kwargs.pop("mdtol", None)
result = self.call_func(data, axis=axis, **kwargs)
if mdtol is not None and ma.isMaskedArray(data):
fraction_not_missing = data.count(axis=axis) / data.shape[axis]
mask_update = 1 - mdtol > fraction_not_missing
if ma.isMaskedArray(result):
result.mask = result.mask | mask_update
else:
result = ma.array(result, mask=mask_update)
return result
def update_metadata(self, cube, coords, **kwargs):
"""
Update common cube metadata w.r.t the aggregation function.
Args:
* cube (:class:`iris.cube.Cube`):
Source cube that requires metadata update.
* coords (:class:`iris.coords.Coord`):
The one or more coordinates that were aggregated.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "ddof"
keyword for a standard deviation aggregator).
"""
# Update the units if required.
if self.units_func is not None:
cube.units = self.units_func(cube.units)
def post_process(self, collapsed_cube, data_result, coords, **kwargs):
"""
Process the result from :func:`iris.analysis.Aggregator.aggregate`.
Args:
* collapsed_cube:
A :class:`iris.cube.Cube`.
* data_result:
Result from :func:`iris.analysis.Aggregator.aggregate`
* coords:
The one or more coordinates that were aggregated over.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "ddof"
keyword from a standard deviation aggregator).
Returns:
The collapsed cube with its aggregated data payload.
"""
collapsed_cube.data = data_result
return collapsed_cube
def aggregate_shape(self, **kwargs):
"""
The shape of the new dimension/s created by the aggregator.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords.
Returns:
A tuple of the aggregate shape.
"""
return ()
def name(self):
"""
Returns the name of the aggregator.
"""
try:
name = "_".join(self.cell_method.split())
except AttributeError:
name = "unknown"
return name
class PercentileAggregator(_Aggregator):
"""
The :class:`PercentileAggregator` class provides percentile aggregation
functionality.
This aggregator *may* introduce a new dimension to the data for the
statistic being calculated, but only if more than one quantile is required.
For example, calculating the 50th and 90th percentile will result in a new
data dimension with an extent of 2, for each of the quantiles calculated.
"""
def __init__(self, units_func=None, lazy_func=None, **kwargs):
"""
Create a percentile aggregator.
Kwargs:
* units_func (callable):
| *Call signature*: (units)
If provided, called to convert a cube's units.
Returns an :class:`cf_units.Unit`, or a
value that can be made into one.
* lazy_func (callable or None):
An alternative to :data:`call_func` implementing a lazy
aggregation. Note that, it need not support all features of the
main operation, but should raise an error in unhandled cases.
Additional kwargs::
Passed through to :data:`call_func` and :data:`lazy_func`.
This aggregator can used by cube aggregation methods such as
:meth:`~iris.cube.Cube.collapsed` and
:meth:`~iris.cube.Cube.aggregated_by`. For example::
cube.collapsed('longitude', iris.analysis.PERCENTILE, percent=50)
"""
self._name = "percentile"
self._args = ["percent"]
_Aggregator.__init__(
self,
None,
_percentile,
units_func=units_func,
lazy_func=lazy_func,
**kwargs,
)
def aggregate(self, data, axis, **kwargs):
"""
Perform the percentile aggregation over the given data.
Keyword arguments are passed through to the data aggregation function
(for example, the "percent" keyword for a percentile aggregator).
This function is usually used in conjunction with update_metadata(),
which should be passed the same keyword arguments.
Args:
* data (array):
Data array.
* axis (int):
Axis to aggregate over.
Kwargs:
* mdtol (float):
Tolerance of missing data. The value returned will be masked if
the fraction of data to missing data is less than or equal to
mdtol. mdtol=0 means no missing data is tolerated while mdtol=1
will return the resulting value from the aggregation function.
Defaults to 1.
* kwargs:
All keyword arguments apart from those specified above, are
passed through to the data aggregation function.
Returns:
The aggregated data.
"""
msg = "{} aggregator requires the mandatory keyword argument {!r}."
for arg in self._args:
if arg not in kwargs:
raise ValueError(msg.format(self.name(), arg))
return _Aggregator.aggregate(self, data, axis, **kwargs)
def post_process(self, collapsed_cube, data_result, coords, **kwargs):
"""
Process the result from :func:`iris.analysis.Aggregator.aggregate`.
Args:
* collapsed_cube:
A :class:`iris.cube.Cube`.
* data_result:
Result from :func:`iris.analysis.Aggregator.aggregate`
* coords:
The one or more coordinates that were aggregated over.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "percent"
keywords from a percentile aggregator).
Returns:
The collapsed cube with it's aggregated data payload.
"""
cubes = iris.cube.CubeList()
# The additive aggregator requires a mandatory keyword.
msg = "{} aggregator requires the mandatory keyword argument {!r}."
for arg in self._args:
if arg not in kwargs:
raise ValueError(msg.format(self.name(), arg))
points = kwargs[self._args[0]]
# Derive the name of the additive coordinate.
names = [coord.name() for coord in coords]
coord_name = "{}_over_{}".format(self.name(), "_".join(names))
if not isinstance(points, Iterable):
points = [points]
# Decorate a collapsed cube with a scalar additive coordinate
# for each of the additive points, to result in a possibly higher
# order cube.
for point in points:
cube = collapsed_cube.copy()
coord = iris.coords.AuxCoord(
point, long_name=coord_name, units="percent"
)
cube.add_aux_coord(coord)
cubes.append(cube)
collapsed_cube = cubes.merge_cube()
# Ensure to roll the data payload additive dimension, which should
# be the last dimension for an additive operation with more than
# one point, to be the first dimension, thus matching the collapsed
# cube.
if self.aggregate_shape(**kwargs):
# Roll the last additive dimension to be the first.
data_result = np.rollaxis(data_result, -1)
# Marry the collapsed cube and the data payload together.
result = _Aggregator.post_process(
self, collapsed_cube, data_result, coords, **kwargs
)
return result
def aggregate_shape(self, **kwargs):
"""
The shape of the additive dimension created by the aggregator.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords.
Returns:
A tuple of the additive dimension shape.
"""
msg = "{} aggregator requires the mandatory keyword argument {!r}."
for arg in self._args:
if arg not in kwargs:
raise ValueError(msg.format(self.name(), arg))
points = kwargs[self._args[0]]
shape = ()
if not isinstance(points, Iterable):
points = [points]
points = np.array(points)
if points.shape > (1,):
shape = points.shape
return shape
def name(self):
"""
Returns the name of the aggregator.
"""
return self._name
class WeightedPercentileAggregator(PercentileAggregator):
"""
The :class:`WeightedPercentileAggregator` class provides percentile
aggregation functionality.
This aggregator *may* introduce a new dimension to the data for the
statistic being calculated, but only if more than one quantile is required.
For example, calculating the 50th and 90th percentile will result in a new
data dimension with an extent of 2, for each of the quantiles calculated.
"""
def __init__(self, units_func=None, lazy_func=None, **kwargs):
"""
Create a weighted percentile aggregator.
Kwargs:
* units_func (callable):
| *Call signature*: (units)
If provided, called to convert a cube's units.
Returns an :class:`cf_units.Unit`, or a
value that can be made into one.
* lazy_func (callable or None):
An alternative to :data:`call_func` implementing a lazy
aggregation. Note that, it need not support all features of the
main operation, but should raise an error in unhandled cases.
Additional kwargs::
Passed through to :data:`call_func` and :data:`lazy_func`.
This aggregator can used by cube aggregation methods such as
:meth:`~iris.cube.Cube.collapsed` and
:meth:`~iris.cube.Cube.aggregated_by`. For example::
cube.collapsed('longitude', iris.analysis.WPERCENTILE, percent=50,
weights=iris.analysis.cartography.area_weights(cube))
"""
_Aggregator.__init__(
self,
None,
_weighted_percentile,
units_func=units_func,
lazy_func=lazy_func,
**kwargs,
)
self._name = "weighted_percentile"
self._args = ["percent", "weights"]
#: A list of keywords associated with weighted behaviour.
self._weighting_keywords = ["returned", "weights"]
def post_process(self, collapsed_cube, data_result, coords, **kwargs):
"""
Process the result from :func:`iris.analysis.Aggregator.aggregate`.
Returns a tuple(cube, weights) if a tuple(data, weights) was returned
from :func:`iris.analysis.Aggregator.aggregate`.
Args:
* collapsed_cube:
A :class:`iris.cube.Cube`.
* data_result:
Result from :func:`iris.analysis.Aggregator.aggregate`
* coords:
The one or more coordinates that were aggregated over.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "weights"
keyword).
Returns:
The collapsed cube with it's aggregated data payload. Or a tuple
pair of (cube, weights) if the keyword "returned" is specified
and True.
"""
if kwargs.get("returned", False):
# Package the data into the cube and return a tuple
collapsed_cube = PercentileAggregator.post_process(
self, collapsed_cube, data_result[0], coords, **kwargs
)
result = (collapsed_cube, data_result[1])
else:
result = PercentileAggregator.post_process(
self, collapsed_cube, data_result, coords, **kwargs
)
return result
class Aggregator(_Aggregator):
"""
The :class:`Aggregator` class provides common aggregation functionality.
"""
def update_metadata(self, cube, coords, **kwargs):
"""
Update cube cell method metadata w.r.t the aggregation function.
Args:
* cube (:class:`iris.cube.Cube`):
Source cube that requires metadata update.
* coords (:class:`iris.coords.Coord`):
The one or more coordinates that were aggregated.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "ddof"
keyword for a standard deviation aggregator).
"""
_Aggregator.update_metadata(self, cube, coords, **kwargs)
kwargs = dict(list(self._kwargs.items()) + list(kwargs.items()))
if not isinstance(coords, (list, tuple)):
coords = [coords]
coord_names = []
for coord in coords:
if not isinstance(coord, iris.coords.Coord):
raise TypeError(
"Coordinate instance expected to the " "Aggregator object."
)
coord_names.append(coord.name())
# Add a cell method.
method_name = self.cell_method.format(**kwargs)
cell_method = iris.coords.CellMethod(method_name, coord_names)
cube.add_cell_method(cell_method)
class WeightedAggregator(Aggregator):
"""
Convenience class that supports common weighted aggregation functionality.
"""
def __init__(
self, cell_method, call_func, units_func=None, lazy_func=None, **kwargs
):
"""
Create a weighted aggregator for the given :data:`call_func`.
Args:
* cell_method (string):
Cell method string that supports string format substitution.
* call_func (callable):
Data aggregation function. Call signature `(data, axis, **kwargs)`.
Kwargs:
* units_func (callable):
Units conversion function.
* lazy_func (callable or None):
An alternative to :data:`call_func` implementing a lazy
aggregation. Note that, it need not support all features of the
main operation, but should raise an error in unhandled cases.
Additional kwargs:
Passed through to :data:`call_func` and :data:`lazy_func`.
"""
Aggregator.__init__(
self,
cell_method,
call_func,
units_func=units_func,
lazy_func=lazy_func,
**kwargs,
)
#: A list of keywords that trigger weighted behaviour.
self._weighting_keywords = ["returned", "weights"]
def uses_weighting(self, **kwargs):
"""
Determine whether this aggregator uses weighting.
Kwargs:
* kwargs:
Arguments to filter of weighted keywords.
Returns:
Boolean.
"""
result = False
for kwarg in kwargs.keys():
if kwarg in self._weighting_keywords:
result = True
break
return result
def post_process(self, collapsed_cube, data_result, coords, **kwargs):
"""
Process the result from :func:`iris.analysis.Aggregator.aggregate`.
Returns a tuple(cube, weights) if a tuple(data, weights) was returned
from :func:`iris.analysis.Aggregator.aggregate`.
Args:
* collapsed_cube:
A :class:`iris.cube.Cube`.
* data_result:
Result from :func:`iris.analysis.Aggregator.aggregate`
* coords:
The one or more coordinates that were aggregated over.
Kwargs:
* This function is intended to be used in conjunction with aggregate()
and should be passed the same keywords (for example, the "weights"
keywords from a mean aggregator).
Returns:
The collapsed cube with it's aggregated data payload. Or a tuple
pair of (cube, weights) if the keyword "returned" is specified
and True.
"""
if kwargs.get("returned", False):
# Package the data into the cube and return a tuple
collapsed_cube.data, collapsed_weights = data_result
result = (collapsed_cube, collapsed_weights)
else:
result = Aggregator.post_process(
self, collapsed_cube, data_result, coords, **kwargs
)
return result
def _build_dask_mdtol_function(dask_stats_function):
"""
Make a wrapped dask statistic function that supports the 'mdtol' keyword.
'dask_function' must be a dask statistical function, compatible with the
call signature : "dask_stats_function(data, axis=axis, **kwargs)".
It must be masked-data tolerant, i.e. it ignores masked input points and
performs a calculation on only the unmasked points.
For example, mean([1, --, 2]) = (1 + 2) / 2 = 1.5.
The returned value is a new function operating on dask arrays.
It has the call signature `stat(data, axis=-1, mdtol=None, **kwargs)`.
"""
@wraps(dask_stats_function)
def inner_stat(array, axis=-1, mdtol=None, **kwargs):
# Call the statistic to get the basic result (missing-data tolerant).
dask_result = dask_stats_function(array, axis=axis, **kwargs)
if mdtol is None or mdtol >= 1.0:
result = dask_result
else:
# Build a lazy computation to compare the fraction of missing
# input points at each output point to the 'mdtol' threshold.
point_mask_counts = da.sum(da.ma.getmaskarray(array), axis=axis)
points_per_calc = array.size / dask_result.size
masked_point_fractions = point_mask_counts / points_per_calc
boolean_mask = masked_point_fractions > mdtol
# Return an mdtol-masked version of the basic result.
result = da.ma.masked_array(
da.ma.getdata(dask_result), boolean_mask
)
return result
return inner_stat
def _percentile(data, axis, percent, fast_percentile_method=False, **kwargs):
"""
The percentile aggregator is an additive operation. This means that
it *may* introduce a new dimension to the data for the statistic being
calculated, but only if more than one percentile point is requested.
If a new additive dimension is formed, then it will always be the last
dimension of the resulting percentile data payload.
Kwargs:
* fast_percentile_method (boolean) :
When set to True, uses the numpy.percentiles method as a faster
alternative to the scipy.mstats.mquantiles method. Does not handle
masked arrays.
"""
# Ensure that the target axis is the last dimension.
data = np.rollaxis(data, axis, start=data.ndim)
shape = data.shape[:-1]
# Flatten any leading dimensions.
if shape:
data = data.reshape([np.prod(shape), data.shape[-1]])
# Perform the percentile calculation.
if fast_percentile_method:
msg = "Cannot use fast np.percentile method with masked array."
if ma.is_masked(data):
raise TypeError(msg)
result = np.percentile(data, percent, axis=-1)
result = result.T
else:
quantiles = np.array(percent) / 100.0
result = scipy.stats.mstats.mquantiles(
data, quantiles, axis=-1, **kwargs
)
if not ma.isMaskedArray(data) and not ma.is_masked(result):
result = np.asarray(result)
else:
result = ma.MaskedArray(result)
# Ensure to unflatten any leading dimensions.
if shape:
if not isinstance(percent, Iterable):
percent = [percent]
percent = np.array(percent)
# Account for the additive dimension.
if percent.shape > (1,):
shape += percent.shape
result = result.reshape(shape)
# Check whether to reduce to a scalar result, as per the behaviour
# of other aggregators.
if result.shape == (1,) and quantiles.ndim == 0:
result = result[0]
return result
def _weighted_quantile_1D(data, weights, quantiles, **kwargs):
"""
Compute the weighted quantile of a 1D numpy array.
Adapted from `wquantiles <https://github.com/nudomarinero/wquantiles/>`_
Args:
* data (array)
One dimensional data array
* weights (array)
Array of the same size of `data`. If data is masked, weights must have
matching mask.
* quantiles : (float or sequence of floats)
Quantile(s) to compute. Must have a value between 0 and 1.
**kwargs
passed to `scipy.interpolate.interp1d`
Returns:
array or float. Calculated quantile values (set to np.nan wherever sum
of weights is zero or masked)
"""
# Return np.nan if no useable points found
if np.isclose(weights.sum(), 0.0) or ma.is_masked(weights.sum()):
return np.resize(np.array(np.nan), len(quantiles))
# Sort the data
ind_sorted = ma.argsort(data)
sorted_data = data[ind_sorted]
sorted_weights = weights[ind_sorted]
# Compute the auxiliary arrays
Sn = np.cumsum(sorted_weights)
Pn = (Sn - 0.5 * sorted_weights) / np.sum(sorted_weights)
# Get the value of the weighted quantiles
interpolator = scipy.interpolate.interp1d(
Pn, sorted_data, bounds_error=False, **kwargs
)
result = interpolator(quantiles)
# Set cases where quantile falls outside data range to min or max
np.place(result, Pn.min() > quantiles, sorted_data.min())
np.place(result, Pn.max() < quantiles, sorted_data.max())
return result
def _weighted_percentile(
data, axis, weights, percent, returned=False, **kwargs
):
"""
The weighted_percentile aggregator is an additive operation. This means
that it *may* introduce a new dimension to the data for the statistic being
calculated, but only if more than one percentile point is requested.
If a new additive dimension is formed, then it will always be the last
dimension of the resulting percentile data payload.
Args:
* data: ndarray or masked array
* axis: int
axis to calculate percentiles over
* weights: ndarray
array with the weights. Must have same shape as data
* percent: float or sequence of floats
Percentile rank/s at which to extract value/s.
* returned: bool, optional
Default False. If True, returns a tuple with the percentiles as the
first element and the sum of the weights as the second element.
"""
# Ensure that data and weights arrays are same shape.
if data.shape != weights.shape:
raise ValueError("_weighted_percentile: weights wrong shape.")
# Ensure that the target axis is the last dimension.
data = np.rollaxis(data, axis, start=data.ndim)
weights = np.rollaxis(weights, axis, start=data.ndim)
quantiles = np.array(percent) / 100.0
# Add data mask to weights if necessary.
if ma.isMaskedArray(data):
weights = ma.array(weights, mask=data.mask)
shape = data.shape[:-1]
# Flatten any leading dimensions and loop over them
if shape:
data = data.reshape([np.prod(shape), data.shape[-1]])
weights = weights.reshape([np.prod(shape), data.shape[-1]])
result = np.empty((np.prod(shape), quantiles.size))
# Perform the percentile calculation.
for res, dat, wt in zip(result, data, weights):
res[:] = _weighted_quantile_1D(dat, wt, quantiles, **kwargs)
else:
# Data is 1D
result = _weighted_quantile_1D(data, weights, quantiles, **kwargs)
if np.any(np.isnan(result)):
result = ma.masked_invalid(result)
if not ma.isMaskedArray(data) and not ma.is_masked(result):
result = np.asarray(result)
# Ensure to unflatten any leading dimensions.
if shape:
if not isinstance(percent, Iterable):
percent = [percent]
percent = np.array(percent)
# Account for the additive dimension.
if percent.shape > (1,):
shape += percent.shape
result = result.reshape(shape)
# Check whether to reduce to a scalar result, as per the behaviour
# of other aggregators.
if result.shape == (1,) and quantiles.ndim == 0:
result = result[0]
if returned:
return result, weights.sum(axis=-1)
else:
return result
@_build_dask_mdtol_function
def _lazy_count(array, **kwargs):
array = iris._lazy_data.as_lazy_data(array)
func = kwargs.pop("function", None)
if not callable(func):
emsg = "function must be a callable. Got {}."
raise TypeError(emsg.format(type(func)))
return da.sum(func(array), **kwargs)
def _proportion(array, function, axis, **kwargs):
count = iris._lazy_data.non_lazy(_lazy_count)
# if the incoming array is masked use that to count the total number of
# values
if ma.isMaskedArray(array):
# calculate the total number of non-masked values across the given axis
if array.mask is np.bool_(False):
# numpy will return a single boolean as a mask if the mask
# was not explicitly specified on array construction, so in this
# case pass the array shape instead of the mask:
total_non_masked = array.shape[axis]
else:
total_non_masked = count(
array.mask, axis=axis, function=np.logical_not, **kwargs
)
total_non_masked = ma.masked_equal(total_non_masked, 0)
else:
total_non_masked = array.shape[axis]
# Sanitise the result of this operation thru ma.asarray to ensure that
# the dtype of the fill-value and the dtype of the array are aligned.
# Otherwise, it is possible for numpy to return a masked array that has
# a dtype for its data that is different to the dtype of the fill-value,
# which can cause issues outside this function.
# Reference - tests/unit/analyis/test_PROPORTION.py Test_masked.test_ma
numerator = count(array, axis=axis, function=function, **kwargs)
result = ma.asarray(numerator / total_non_masked)
return result
def _rms(array, axis, **kwargs):
# XXX due to the current limitations in `da.average` (see below), maintain
# an explicit non-lazy aggregation function for now.
# Note: retaining this function also means that if weights are passed to
# the lazy aggregator, the aggregation will fall back to using this
# non-lazy aggregator.
rval = np.sqrt(ma.average(np.square(array), axis=axis, **kwargs))
if not ma.isMaskedArray(array):
rval = np.asarray(rval)
return rval
@_build_dask_mdtol_function
def _lazy_rms(array, axis, **kwargs):
# XXX This should use `da.average` and not `da.mean`, as does the above.
# However `da.average` current doesn't handle masked weights correctly
# (see https://github.com/dask/dask/issues/3846).
# To work around this we use da.mean, which doesn't support weights at
# all. Thus trying to use this aggregator with weights will currently
# raise an error in dask due to the unexpected keyword `weights`,
# rather than silently returning the wrong answer.
return da.sqrt(da.mean(array ** 2, axis=axis, **kwargs))
@_build_dask_mdtol_function
def _lazy_sum(array, **kwargs):
array = iris._lazy_data.as_lazy_data(array)
# weighted or scaled sum
axis_in = kwargs.get("axis", None)
weights_in = kwargs.pop("weights", None)
returned_in = kwargs.pop("returned", False)
if weights_in is not None:
wsum = da.sum(weights_in * array, **kwargs)
else:
wsum = da.sum(array, **kwargs)
if returned_in:
if weights_in is None:
weights = iris._lazy_data.as_lazy_data(np.ones_like(array))
else:
weights = weights_in
rvalue = (wsum, da.sum(weights, axis=axis_in))
else:
rvalue = wsum
return rvalue
def _peak(array, **kwargs):
def column_segments(column):
nan_indices = np.where(np.isnan(column))[0]
columns = []
if len(nan_indices) == 0:
columns.append(column)
else:
for index, nan_index in enumerate(nan_indices):
if index == 0:
if index != nan_index:
columns.append(column[:nan_index])
elif nan_indices[index - 1] != (nan_index - 1):
columns.append(
column[nan_indices[index - 1] + 1 : nan_index]
)
if nan_indices[-1] != len(column) - 1:
columns.append(column[nan_indices[-1] + 1 :])
return columns
def interp_order(length):
if length == 1:
k = None
elif length > 5:
k = 5
else:
k = length - 1
return k
# Collapse array to its final data shape.
slices = [slice(None)] * array.ndim
endslice = slice(0, 1) if len(slices) == 1 else 0
slices[-1] = endslice
slices = tuple(slices) # Numpy>=1.16 : index with tuple, *not* list.
if isinstance(array.dtype, np.float64):
data = array[slices]
else:
# Cast non-float data type.
data = array.astype("float32")[slices]
# Generate nd-index iterator over array.
shape = list(array.shape)
shape[-1] = 1
ndindices = np.ndindex(*shape)
for ndindex in ndindices:
ndindex_slice = list(ndindex)
ndindex_slice[-1] = slice(None)
column_slice = array[tuple(ndindex_slice)]
# Check if the column slice contains a single value, nans only,
# masked values only or if the values are all equal.
equal_slice = (
np.ones(column_slice.size, dtype=column_slice.dtype)
* column_slice[0]
)
if (
column_slice.size == 1
or all(np.isnan(column_slice))
or ma.count(column_slice) == 0
or np.all(np.equal(equal_slice, column_slice))
):
continue
# Check if the column slice is masked.
if ma.isMaskedArray(column_slice):
# Check if the column slice contains only nans, without inf
# or -inf values, regardless of the mask.
if not np.any(np.isfinite(column_slice)) and not np.any(
np.isinf(column_slice)
):
data[ndindex[:-1]] = np.nan
continue
# Replace masked values with nans.
column_slice = column_slice.filled(np.nan)
# Determine the column segments that require a fitted spline.
columns = column_segments(column_slice)
column_peaks = []
for column in columns:
# Determine the interpolation order for the spline fit.
k = interp_order(column.size)
if k is None:
column_peaks.append(column[0])
continue
tck = scipy.interpolate.splrep(np.arange(column.size), column, k=k)
npoints = column.size * 100
points = np.linspace(0, column.size - 1, npoints)
spline = scipy.interpolate.splev(points, tck)
column_max = np.max(column)
spline_max = np.max(spline)
# Check if the max value of the spline is greater than the
# max value of the column.
if spline_max > column_max:
column_peaks.append(spline_max)
else:
column_peaks.append(column_max)
data[ndindex[:-1]] = np.max(column_peaks)
return data
#
# Common partial Aggregation class constructors.
#
COUNT = Aggregator(
"count",
iris._lazy_data.non_lazy(_lazy_count),
units_func=lambda units: 1,
lazy_func=_lazy_count,
)
"""
An :class:`~iris.analysis.Aggregator` instance that counts the number
of :class:`~iris.cube.Cube` data occurrences that satisfy a particular
criterion, as defined by a user supplied *function*.
**Required** kwargs associated with the use of this aggregator:
* function (callable):
A function which converts an array of data values into a corresponding
array of True/False values.
**For example**:
To compute the number of *ensemble members* with precipitation exceeding 10
(in cube data units) could be calculated with::
result = precip_cube.collapsed('ensemble_member', iris.analysis.COUNT,
function=lambda values: values > 10)
.. seealso:: The :func:`~iris.analysis.PROPORTION` aggregator.
This aggregator handles masked data.
"""
GMEAN = Aggregator("geometric_mean", scipy.stats.mstats.gmean)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates the
geometric mean over a :class:`~iris.cube.Cube`, as computed by
:func:`scipy.stats.mstats.gmean`.
**For example**:
To compute zonal geometric means over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.GMEAN)
This aggregator handles masked data.
"""
HMEAN = Aggregator("harmonic_mean", scipy.stats.mstats.hmean)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates the
harmonic mean over a :class:`~iris.cube.Cube`, as computed by
:func:`scipy.stats.mstats.hmean`.
**For example**:
To compute zonal harmonic mean over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.HMEAN)
.. note::
The harmonic mean is only valid if all data values are greater
than zero.
This aggregator handles masked data.
"""
MEAN = WeightedAggregator(
"mean", ma.average, lazy_func=_build_dask_mdtol_function(da.ma.average)
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the mean over a :class:`~iris.cube.Cube`, as computed by
:func:`numpy.ma.average`.
Additional kwargs associated with the use of this aggregator:
* mdtol (float):
Tolerance of missing data. The value returned in each element of the
returned array will be masked if the fraction of masked data contributing
to that element exceeds mdtol. This fraction is calculated based on the
number of masked elements. mdtol=0 means no missing data is tolerated
while mdtol=1 means the resulting element will be masked if and only if
all the contributing elements are masked. Defaults to 1.
* weights (float ndarray):
Weights matching the shape of the cube or the length of the window
for rolling window operations. Note that, latitude/longitude area
weights can be calculated using
:func:`iris.analysis.cartography.area_weights`.
* returned (boolean):
Set this to True to indicate that the collapsed weights are to be
returned along with the collapsed data. Defaults to False.
**For example**:
To compute zonal means over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.MEAN)
To compute a weighted area average::
coords = ('longitude', 'latitude')
collapsed_cube, collapsed_weights = cube.collapsed(coords,
iris.analysis.MEAN,
weights=weights,
returned=True)
.. note::
Lazy operation is supported, via :func:`dask.array.ma.average`.
This aggregator handles masked data.
"""
MEDIAN = Aggregator("median", ma.median)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the median over a :class:`~iris.cube.Cube`, as computed by
:func:`numpy.ma.median`.
**For example**:
To compute zonal medians over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.MEDIAN)
This aggregator handles masked data.
"""
MIN = Aggregator(
"minimum", ma.min, lazy_func=_build_dask_mdtol_function(da.min)
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the minimum over a :class:`~iris.cube.Cube`, as computed by
:func:`numpy.ma.min`.
**For example**:
To compute zonal minimums over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.MIN)
This aggregator handles masked data.
"""
MAX = Aggregator(
"maximum", ma.max, lazy_func=_build_dask_mdtol_function(da.max)
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the maximum over a :class:`~iris.cube.Cube`, as computed by
:func:`numpy.ma.max`.
**For example**:
To compute zonal maximums over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.MAX)
This aggregator handles masked data.
"""
PEAK = Aggregator("peak", _peak)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the peak value derived from a spline interpolation over a
:class:`~iris.cube.Cube`.
The peak calculation takes into account nan values. Therefore, if the number
of non-nan values is zero the result itself will be an array of nan values.
The peak calculation also takes into account masked values. Therefore, if the
number of non-masked values is zero the result itself will be a masked array.
If multiple coordinates are specified, then the peak calculations are
performed individually, in sequence, for each coordinate specified.
**For example**:
To compute the peak over the *time* axis of a cube::
result = cube.collapsed('time', iris.analysis.PEAK)
This aggregator handles masked data.
"""
PERCENTILE = PercentileAggregator(alphap=1, betap=1)
"""
An :class:`~iris.analysis.PercentileAggregator` instance that calculates the
percentile over a :class:`~iris.cube.Cube`, as computed by
:func:`scipy.stats.mstats.mquantiles`.
**Required** kwargs associated with the use of this aggregator:
* percent (float or sequence of floats):
Percentile rank/s at which to extract value/s.
Additional kwargs associated with the use of this aggregator:
* alphap (float):
Plotting positions parameter, see :func:`scipy.stats.mstats.mquantiles`.
Defaults to 1.
* betap (float):
Plotting positions parameter, see :func:`scipy.stats.mstats.mquantiles`.
Defaults to 1.
**For example**:
To compute the 10th and 90th percentile over *time*::
result = cube.collapsed('time', iris.analysis.PERCENTILE, percent=[10, 90])
This aggregator handles masked data.
"""
PROPORTION = Aggregator("proportion", _proportion, units_func=lambda units: 1)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates the
proportion, as a fraction, of :class:`~iris.cube.Cube` data occurrences
that satisfy a particular criterion, as defined by a user supplied
*function*.
**Required** kwargs associated with the use of this aggregator:
* function (callable):
A function which converts an array of data values into a corresponding
array of True/False values.
**For example**:
To compute the probability of precipitation exceeding 10
(in cube data units) across *ensemble members* could be calculated with::
result = precip_cube.collapsed('ensemble_member', iris.analysis.PROPORTION,
function=lambda values: values > 10)
Similarly, the proportion of *time* precipitation exceeded 10
(in cube data units) could be calculated with::
result = precip_cube.collapsed('time', iris.analysis.PROPORTION,
function=lambda values: values > 10)
.. seealso:: The :func:`~iris.analysis.COUNT` aggregator.
This aggregator handles masked data.
"""
RMS = WeightedAggregator(
"root mean square", _rms, lazy_func=_build_dask_mdtol_function(_lazy_rms)
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the root mean square over a :class:`~iris.cube.Cube`, as computed by
((x0**2 + x1**2 + ... + xN-1**2) / N) ** 0.5.
Additional kwargs associated with the use of this aggregator:
* weights (float ndarray):
Weights matching the shape of the cube or the length of the window for
rolling window operations. The weights are applied to the squares when
taking the mean.
**For example**:
To compute the zonal root mean square over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.RMS)
This aggregator handles masked data.
"""
STD_DEV = Aggregator(
"standard_deviation",
ma.std,
ddof=1,
lazy_func=_build_dask_mdtol_function(da.std),
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the standard deviation over a :class:`~iris.cube.Cube`, as
computed by :func:`numpy.ma.std`.
Additional kwargs associated with the use of this aggregator:
* ddof (integer):
Delta degrees of freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements. Defaults to 1.
**For example**:
To compute zonal standard deviations over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.STD_DEV)
To obtain the biased standard deviation::
result = cube.collapsed('longitude', iris.analysis.STD_DEV, ddof=0)
.. note::
Lazy operation is supported, via :func:`dask.array.nanstd`.
This aggregator handles masked data.
"""
SUM = WeightedAggregator(
"sum",
iris._lazy_data.non_lazy(_lazy_sum),
lazy_func=_build_dask_mdtol_function(_lazy_sum),
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the sum over a :class:`~iris.cube.Cube`, as computed by :func:`numpy.ma.sum`.
Additional kwargs associated with the use of this aggregator:
* weights (float ndarray):
Weights matching the shape of the cube, or the length of
the window for rolling window operations. Weights should be
normalized before using them with this aggregator if scaling
is not intended.
* returned (boolean):
Set this to True to indicate the collapsed weights are to be returned
along with the collapsed data. Defaults to False.
**For example**:
To compute an accumulation over the *time* axis of a cube::
result = cube.collapsed('time', iris.analysis.SUM)
To compute a weighted rolling sum e.g. to apply a digital filter::
weights = np.array([.1, .2, .4, .2, .1])
result = cube.rolling_window('time', iris.analysis.SUM,
len(weights), weights=weights)
This aggregator handles masked data.
"""
VARIANCE = Aggregator(
"variance",
ma.var,
units_func=lambda units: units * units,
lazy_func=_build_dask_mdtol_function(da.var),
ddof=1,
)
"""
An :class:`~iris.analysis.Aggregator` instance that calculates
the variance over a :class:`~iris.cube.Cube`, as computed by
:func:`numpy.ma.var`.
Additional kwargs associated with the use of this aggregator:
* ddof (integer):
Delta degrees of freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements. Defaults to 1.
**For example**:
To compute zonal variance over the *longitude* axis of a cube::
result = cube.collapsed('longitude', iris.analysis.VARIANCE)
To obtain the biased variance::
result = cube.collapsed('longitude', iris.analysis.VARIANCE, ddof=0)
.. note::
Lazy operation is supported, via :func:`dask.array.nanvar`.
This aggregator handles masked data.
"""
WPERCENTILE = WeightedPercentileAggregator()
"""
An :class:`~iris.analysis.WeightedPercentileAggregator` instance that
calculates the weighted percentile over a :class:`~iris.cube.Cube`.
**Required** kwargs associated with the use of this aggregator:
* percent (float or sequence of floats):
Percentile rank/s at which to extract value/s.
* weights (float ndarray):
Weights matching the shape of the cube or the length of the window
for rolling window operations. Note that, latitude/longitude area
weights can be calculated using
:func:`iris.analysis.cartography.area_weights`.
Additional kwargs associated with the use of this aggregator:
* returned (boolean):
Set this to True to indicate that the collapsed weights are to be
returned along with the collapsed data. Defaults to False.
* kind (string or int):
Specifies the kind of interpolation used, see
:func:`scipy.interpolate.interp1d` Defaults to "linear", which is
equivalent to alphap=0.5, betap=0.5 in `iris.analysis.PERCENTILE`
"""
class _Groupby:
"""
Convenience class to determine group slices over one or more group-by
coordinates.
Generate the coordinate slices for the groups and calculate the
new group-by coordinates and the new shared coordinates given the
group slices. Note that, new shared coordinates will be bounded
coordinates.
Assumes that all the coordinates share the same axis, therefore all
of the coordinates must be of the same length.
Group-by coordinates are those coordinates over which value groups
are to be determined.
Shared coordinates are those coordinates which share the same axis
as group-by coordinates, but which are not to be included in the
group-by analysis.
"""
def __init__(self, groupby_coords, shared_coords=None):
"""
Determine the group slices over the group-by coordinates.
Args:
* groupby_coords (list :class:`iris.coords.Coord` instances):
One or more coordinates from the same axis over which to group-by.
Kwargs:
* shared_coords (list of (:class:`iris.coords.Coord`, `int`) pairs):
One or more coordinates (including multidimensional coordinates)
that share the same group-by coordinate axis. The `int` identifies
which dimension of the coord is on the group-by coordinate axis.
"""
#: Group-by and shared coordinates that have been grouped.
self.coords = []
self._groupby_coords = []
self._shared_coords = []
self._slices_by_key = OrderedDict()
self._stop = None
# Ensure group-by coordinates are iterable.
if not isinstance(groupby_coords, Iterable):
raise TypeError(
"groupby_coords must be a " "`collections.Iterable` type."
)
# Add valid group-by coordinates.
for coord in groupby_coords:
self._add_groupby_coord(coord)
# Add the coordinates sharing the same axis as the group-by
# coordinates.
if shared_coords is not None:
# Ensure shared coordinates are iterable.
if not isinstance(shared_coords, Iterable):
raise TypeError(
"shared_coords must be a " "`collections.Iterable` type."
)
# Add valid shared coordinates.
for coord, dim in shared_coords:
self._add_shared_coord(coord, dim)
def _add_groupby_coord(self, coord):
if coord.ndim != 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
if self._stop is None:
self._stop = coord.shape[0]
if coord.shape[0] != self._stop:
raise ValueError("Group-by coordinates have different lengths.")
self._groupby_coords.append(coord)
def _add_shared_coord(self, coord, dim):
if coord.shape[dim] != self._stop and self._stop is not None:
raise ValueError("Shared coordinates have different lengths.")
self._shared_coords.append((coord, dim))
def group(self):
"""
Calculate the groups and associated slices over one or more group-by
coordinates.
Also creates new group-by and shared coordinates given the calculated
group slices.
Returns:
A generator of the coordinate group slices.
"""
if self._groupby_coords:
if not self._slices_by_key:
items = []
groups = []
for coord in self._groupby_coords:
groups.append(iris.coords._GroupIterator(coord.points))
items.append(next(groups[-1]))
# Construct the group slice for each group over the group-by
# coordinates. Keep constructing until all group-by coordinate
# groups are exhausted.
while any([item is not None for item in items]):
# Determine the extent (start, stop) of the group given
# each current group-by coordinate group.
start = max(
[
item.groupby_slice.start
for item in items
if item is not None
]
)
stop = min(
[
item.groupby_slice.stop
for item in items
if item is not None
]
)
# Construct composite group key for the group using the
# start value from each group-by coordinate.
key = tuple(
[coord.points[start] for coord in self._groupby_coords]
)
# Associate group slice with group key within the ordered
# dictionary.
self._slices_by_key.setdefault(key, []).append(
slice(start, stop)
)
# Prepare for the next group slice construction over the
# group-by coordinates.
for item_index, item in enumerate(items):
if item is None:
continue
# Get coordinate current group slice.
groupby_slice = item.groupby_slice
# Determine whether coordinate has spanned all its
# groups i.e. its full length
# or whether we need to get the coordinates next group.
if groupby_slice.stop == self._stop:
# This coordinate has exhausted all its groups,
# so remove it.
items[item_index] = None
elif groupby_slice.stop == stop:
# The current group of this coordinate is
# exhausted, so get the next one.
items[item_index] = next(groups[item_index])
# Merge multiple slices together into one tuple.
self._slice_merge()
# Calculate the new group-by coordinates.
self._compute_groupby_coords()
# Calculate the new shared coordinates.
self._compute_shared_coords()
# Generate the group-by slices/groups.
for groupby_slice in self._slices_by_key.values():
yield groupby_slice
return
def _slice_merge(self):
"""
Merge multiple slices into one tuple and collapse items from
containing list.
"""
# Iterate over the ordered dictionary in order to reduce
# multiple slices into a single tuple and collapse
# all items from containing list.
for key, groupby_slices in self._slices_by_key.items():
if len(groupby_slices) > 1:
# Compress multiple slices into tuple representation.
groupby_indicies = []
for groupby_slice in groupby_slices:
groupby_indicies.extend(
range(groupby_slice.start, groupby_slice.stop)
)
self._slices_by_key[key] = tuple(groupby_indicies)
else:
# Remove single inner slice from list.
self._slices_by_key[key] = groupby_slices[0]
def _compute_groupby_coords(self):
"""Create new group-by coordinates given the group slices."""
groupby_slice = []
# Iterate over the ordered dictionary in order to construct
# a group-by slice that samples the first element from each group.
for key_slice in self._slices_by_key.values():
if isinstance(key_slice, tuple):
groupby_slice.append(key_slice[0])
else:
groupby_slice.append(key_slice.start)
groupby_slice = np.array(groupby_slice)
# Create new group-by coordinates from the group-by slice.
self.coords = [coord[groupby_slice] for coord in self._groupby_coords]
def _compute_shared_coords(self):
"""Create the new shared coordinates given the group slices."""
groupby_bounds = []
# Iterate over the ordered dictionary in order to construct
# a list of tuple group boundary indexes.
for key_slice in self._slices_by_key.values():
if isinstance(key_slice, tuple):
groupby_bounds.append((key_slice[0], key_slice[-1]))
else:
groupby_bounds.append((key_slice.start, key_slice.stop - 1))
# Create new shared bounded coordinates.
for coord, dim in self._shared_coords:
if coord.points.dtype.kind in "SU":
if coord.bounds is None:
new_points = []
new_bounds = None
# np.apply_along_axis does not work with str.join, so we
# need to loop through the array directly. First move axis
# of interest to trailing dim and flatten the others.
work_arr = np.moveaxis(coord.points, dim, -1)
shape = work_arr.shape
work_shape = (-1, shape[-1])
new_shape = (len(self),)
if coord.ndim > 1:
new_shape += shape[:-1]
work_arr = work_arr.reshape(work_shape)
for key_slice in self._slices_by_key.values():
if isinstance(key_slice, slice):
indices = key_slice.indices(
coord.points.shape[dim]
)
key_slice = range(*indices)
for arr in work_arr:
new_points.append("|".join(arr.take(key_slice)))
# Reinstate flattened dimensions. Aggregated dim now leads.
new_points = np.array(new_points).reshape(new_shape)
# Move aggregated dimension back to position it started in.
new_points = np.moveaxis(new_points, 0, dim)
else:
msg = (
"collapsing the bounded string coordinate {0!r}"
" is not supported".format(coord.name())
)
raise ValueError(msg)
else:
new_bounds = []
# Construct list of coordinate group boundary pairs.
for start, stop in groupby_bounds:
if coord.has_bounds():
# Collapse group bounds into bounds.
if (
getattr(coord, "circular", False)
and (stop + 1) == coord.shape[dim]
):
new_bounds.append(
[
coord.bounds.take(start, dim).take(0, -1),
coord.bounds.take(0, dim).take(0, -1)
+ coord.units.modulus,
]
)
else:
new_bounds.append(
[
coord.bounds.take(start, dim).take(0, -1),
coord.bounds.take(stop, dim).take(1, -1),
]
)
else:
# Collapse group points into bounds.
if getattr(coord, "circular", False) and (
stop + 1
) == len(coord.points):
new_bounds.append(
[
coord.points.take(start, dim),
coord.points.take(0, dim)
+ coord.units.modulus,
]
)
else:
new_bounds.append(
[
coord.points.take(start, dim),
coord.points.take(stop, dim),
]
)
# Bounds needs to be an array with the length 2 start-stop
# dimension last, and the aggregated dimension back in its
# original position.
new_bounds = np.moveaxis(
np.array(new_bounds), (0, 1), (dim, -1)
)
# Now create the new bounded group shared coordinate.
try:
new_points = new_bounds.mean(-1)
except TypeError:
msg = (
"The {0!r} coordinate on the collapsing dimension"
" cannot be collapsed.".format(coord.name())
)
raise ValueError(msg)
try:
self.coords.append(
coord.copy(points=new_points, bounds=new_bounds)
)
except ValueError:
# non monotonic points/bounds
self.coords.append(
iris.coords.AuxCoord.from_coord(coord).copy(
points=new_points, bounds=new_bounds
)
)
def __len__(self):
"""Calculate the number of groups given the group-by coordinates."""
if self._slices_by_key:
value = len(self._slices_by_key)
else:
value = len([s for s in self.group()])
return value
def __repr__(self):
groupby_coords = [coord.name() for coord in self._groupby_coords]
if self._shared_coords_by_name:
shared_coords = [coord.name() for coord in self._shared_coords]
shared_string = ", shared_coords=%r)" % shared_coords
else:
shared_string = ")"
return "%s(%r%s" % (
self.__class__.__name__,
groupby_coords,
shared_string,
)
def clear_phenomenon_identity(cube):
"""
Helper function to clear the standard_name, attributes, and
cell_methods of a cube.
"""
cube.rename(None)
cube.attributes.clear()
cube.cell_methods = tuple()
###############################################################################
#
# Interpolation API
#
###############################################################################
class Linear:
"""
This class describes the linear interpolation and regridding scheme for
interpolating or regridding over one or more orthogonal coordinates,
typically for use with :meth:`iris.cube.Cube.interpolate()` or
:meth:`iris.cube.Cube.regrid()`.
"""
LINEAR_EXTRAPOLATION_MODES = list(EXTRAPOLATION_MODES.keys()) + ["linear"]
def __init__(self, extrapolation_mode="linear"):
"""
Linear interpolation and regridding scheme suitable for interpolating
or regridding over one or more orthogonal coordinates.
Kwargs:
* extrapolation_mode:
Must be one of the following strings:
* 'extrapolate' or 'linear' - The extrapolation points
will be calculated by extending the gradient of the
closest two points.
* 'nan' - The extrapolation points will be be set to NaN.
* 'error' - A ValueError exception will be raised, notifying an
attempt to extrapolate.
* 'mask' - The extrapolation points will always be masked, even
if the source data is not a MaskedArray.
* 'nanmask' - If the source data is a MaskedArray the
extrapolation points will be masked. Otherwise they will be
set to NaN.
The default mode of extrapolation is 'linear'.
"""
if extrapolation_mode not in self.LINEAR_EXTRAPOLATION_MODES:
msg = "Extrapolation mode {!r} not supported."
raise ValueError(msg.format(extrapolation_mode))
self.extrapolation_mode = extrapolation_mode
def __repr__(self):
return "Linear({!r})".format(self.extrapolation_mode)
def _normalised_extrapolation_mode(self):
mode = self.extrapolation_mode
if mode == "linear":
mode = "extrapolate"
return mode
def interpolator(self, cube, coords):
"""
Creates a linear interpolator to perform interpolation over the
given :class:`~iris.cube.Cube` specified by the dimensions of
the given coordinates.
Typically you should use :meth:`iris.cube.Cube.interpolate` for
interpolating a cube. There are, however, some situations when
constructing your own interpolator is preferable. These are detailed
in the :ref:`user guide <caching_an_interpolator>`.
Args:
* cube:
The source :class:`iris.cube.Cube` to be interpolated.
* coords:
The names or coordinate instances that are to be
interpolated over.
Returns:
A callable with the interface:
`callable(sample_points, collapse_scalar=True)`
where `sample_points` is a sequence containing an array of values
for each of the coordinates passed to this method, and
`collapse_scalar` determines whether to remove length one
dimensions in the result cube caused by scalar values in
`sample_points`.
The values for coordinates that correspond to date/times
may optionally be supplied as datetime.datetime or
cftime.datetime instances.
For example, for the callable returned by:
`Linear().interpolator(cube, ['latitude', 'longitude'])`,
sample_points must have the form
`[new_lat_values, new_lon_values]`.
"""
return RectilinearInterpolator(
cube, coords, "linear", self._normalised_extrapolation_mode()
)
def regridder(self, src_grid, target_grid):
"""
Creates a linear regridder to perform regridding from the source
grid to the target grid.
Typically you should use :meth:`iris.cube.Cube.regrid` for
regridding a cube. There are, however, some situations when
constructing your own regridder is preferable. These are detailed in
the :ref:`user guide <caching_a_regridder>`.
Supports lazy regridding. Any
`chunks <https://docs.dask.org/en/latest/array-chunks.html>`__
in horizontal dimensions will be combined before regridding.
Args:
* src_grid:
The :class:`~iris.cube.Cube` defining the source grid.
* target_grid:
The :class:`~iris.cube.Cube` defining the target grid.
Returns:
A callable with the interface:
`callable(cube)`
where `cube` is a cube with the same grid as `src_grid`
that is to be regridded to the `target_grid`.
"""
return RectilinearRegridder(
src_grid,
target_grid,
"linear",
self._normalised_extrapolation_mode(),
)
class AreaWeighted:
"""
This class describes an area-weighted regridding scheme for regridding
between 'ordinary' horizontal grids with separated X and Y coordinates in a
common coordinate system.
Typically for use with :meth:`iris.cube.Cube.regrid()`.
"""
def __init__(self, mdtol=1):
"""
Area-weighted regridding scheme suitable for regridding between
different orthogonal XY grids in the same coordinate system.
Kwargs:
* mdtol (float):
Tolerance of missing data. The value returned in each element of
the returned array will be masked if the fraction of missing data
exceeds mdtol. This fraction is calculated based on the area of
masked cells within each target cell. mdtol=0 means no masked
data is tolerated while mdtol=1 will mean the resulting element
will be masked if and only if all the overlapping elements of the
source grid are masked. Defaults to 1.
.. Note:
Both sourge and target cubes must have an XY grid defined by
separate X and Y dimensions with dimension coordinates.
All of the XY dimension coordinates must also be bounded, and have
the same cooordinate system.
"""
if not (0 <= mdtol <= 1):
msg = "Value for mdtol must be in range 0 - 1, got {}."
raise ValueError(msg.format(mdtol))
self.mdtol = mdtol
def __repr__(self):
return "AreaWeighted(mdtol={})".format(self.mdtol)
def regridder(self, src_grid_cube, target_grid_cube):
"""
Creates an area-weighted regridder to perform regridding from the
source grid to the target grid.
Typically you should use :meth:`iris.cube.Cube.regrid` for
regridding a cube. There are, however, some situations when
constructing your own regridder is preferable. These are detailed in
the :ref:`user guide <caching_a_regridder>`.
Supports lazy regridding. Any
`chunks <https://docs.dask.org/en/latest/array-chunks.html>`__
in horizontal dimensions will be combined before regridding.
Args:
* src_grid_cube:
The :class:`~iris.cube.Cube` defining the source grid.
* target_grid_cube:
The :class:`~iris.cube.Cube` defining the target grid.
Returns:
A callable with the interface:
`callable(cube)`
where `cube` is a cube with the same grid as `src_grid_cube`
that is to be regridded to the grid of `target_grid_cube`.
"""
return AreaWeightedRegridder(
src_grid_cube, target_grid_cube, mdtol=self.mdtol
)
class Nearest:
"""
This class describes the nearest-neighbour interpolation and regridding
scheme for interpolating or regridding over one or more orthogonal
coordinates, typically for use with :meth:`iris.cube.Cube.interpolate()`
or :meth:`iris.cube.Cube.regrid()`.
"""
def __init__(self, extrapolation_mode="extrapolate"):
"""
Nearest-neighbour interpolation and regridding scheme suitable for
interpolating or regridding over one or more orthogonal coordinates.
Kwargs:
* extrapolation_mode:
Must be one of the following strings:
* 'extrapolate' - The extrapolation points will take their
value from the nearest source point.
* 'nan' - The extrapolation points will be be set to NaN.
* 'error' - A ValueError exception will be raised, notifying an
attempt to extrapolate.
* 'mask' - The extrapolation points will always be masked, even
if the source data is not a MaskedArray.
* 'nanmask' - If the source data is a MaskedArray the
extrapolation points will be masked. Otherwise they will be
set to NaN.
The default mode of extrapolation is 'extrapolate'.
"""
if extrapolation_mode not in EXTRAPOLATION_MODES:
msg = "Extrapolation mode {!r} not supported."
raise ValueError(msg.format(extrapolation_mode))
self.extrapolation_mode = extrapolation_mode
def __repr__(self):
return "Nearest({!r})".format(self.extrapolation_mode)
def interpolator(self, cube, coords):
"""
Creates a nearest-neighbour interpolator to perform
interpolation over the given :class:`~iris.cube.Cube` specified
by the dimensions of the specified coordinates.
Typically you should use :meth:`iris.cube.Cube.interpolate` for
interpolating a cube. There are, however, some situations when
constructing your own interpolator is preferable. These are detailed
in the :ref:`user guide <caching_an_interpolator>`.
Args:
* cube:
The source :class:`iris.cube.Cube` to be interpolated.
* coords:
The names or coordinate instances that are to be
interpolated over.
Returns:
A callable with the interface:
`callable(sample_points, collapse_scalar=True)`
where `sample_points` is a sequence containing an array of values
for each of the coordinates passed to this method, and
`collapse_scalar` determines whether to remove length one
dimensions in the result cube caused by scalar values in
`sample_points`.
The values for coordinates that correspond to date/times
may optionally be supplied as datetime.datetime or
cftime.datetime instances.
For example, for the callable returned by:
`Nearest().interpolator(cube, ['latitude', 'longitude'])`,
sample_points must have the form
`[new_lat_values, new_lon_values]`.
"""
return RectilinearInterpolator(
cube, coords, "nearest", self.extrapolation_mode
)
def regridder(self, src_grid, target_grid):
"""
Creates a nearest-neighbour regridder to perform regridding from the
source grid to the target grid.
Typically you should use :meth:`iris.cube.Cube.regrid` for
regridding a cube. There are, however, some situations when
constructing your own regridder is preferable. These are detailed in
the :ref:`user guide <caching_a_regridder>`.
Supports lazy regridding. Any
`chunks <https://docs.dask.org/en/latest/array-chunks.html>`__
in horizontal dimensions will be combined before regridding.
Args:
* src_grid:
The :class:`~iris.cube.Cube` defining the source grid.
* target_grid:
The :class:`~iris.cube.Cube` defining the target grid.
Returns:
A callable with the interface:
`callable(cube)`
where `cube` is a cube with the same grid as `src_grid`
that is to be regridded to the `target_grid`.
"""
return RectilinearRegridder(
src_grid, target_grid, "nearest", self.extrapolation_mode
)
class UnstructuredNearest:
"""
This is a nearest-neighbour regridding scheme for regridding data whose
horizontal (X- and Y-axis) coordinates are mapped to the *same* dimensions,
rather than being orthogonal on independent dimensions.
For latitude-longitude coordinates, the nearest-neighbour distances are
computed on the sphere, otherwise flat Euclidean distances are used.
The source X and Y coordinates can have any shape.
The target grid must be of the "normal" kind, i.e. it has separate,
1-dimensional X and Y coordinates.
Source and target XY coordinates must have the same coordinate system,
which may also be None.
If any of the XY coordinates are latitudes or longitudes, then they *all*
must be. Otherwise, the corresponding X and Y coordinates must have the
same units in the source and grid cubes.
.. Note::
Currently only supports regridding, not interpolation.
.. Note::
This scheme performs essentially the same job as
:class:`iris.experimental.regrid.ProjectedUnstructuredNearest`.
That scheme is faster, but only works well on data in a limited
region of the globe, covered by a specified projection.
This approach is more rigorously correct and can be applied to global
datasets.
"""
# Note: the argument requirements are simply those of the underlying
# regridder class,
# :class:`iris.analysis.trajectory.UnstructuredNearestNeigbourRegridder`.
def __init__(self):
"""
Nearest-neighbour interpolation and regridding scheme suitable for
interpolating or regridding from un-gridded data such as trajectories
or other data where the X and Y coordinates share the same dimensions.
"""
pass
def __repr__(self):
return "UnstructuredNearest()"
# TODO: add interpolator usage
# def interpolator(self, cube):
def regridder(self, src_cube, target_grid):
"""
Creates a nearest-neighbour regridder, of the
:class:`~iris.analysis.trajectory.UnstructuredNearestNeigbourRegridder`
type, to perform regridding from the source grid to the target grid.
This can then be applied to any source data with the same structure as
the original 'src_cube'.
Typically you should use :meth:`iris.cube.Cube.regrid` for
regridding a cube. There are, however, some situations when
constructing your own regridder is preferable. These are detailed in
the :ref:`user guide <caching_a_regridder>`.
Does not support lazy regridding.
Args:
* src_cube:
The :class:`~iris.cube.Cube` defining the source grid.
The X and Y coordinates can have any shape, but must be mapped over
the same cube dimensions.
* target_grid:
The :class:`~iris.cube.Cube` defining the target grid.
The X and Y coordinates must be one-dimensional dimension
coordinates, mapped to different dimensions.
All other cube components are ignored.
Returns:
A callable with the interface:
`callable(cube)`
where `cube` is a cube with the same grid as `src_cube`
that is to be regridded to the `target_grid`.
"""
from iris.analysis.trajectory import (
UnstructuredNearestNeigbourRegridder,
)
return UnstructuredNearestNeigbourRegridder(src_cube, target_grid)
class PointInCell:
"""
This class describes the point-in-cell regridding scheme for use
typically with :meth:`iris.cube.Cube.regrid()`.
The PointInCell regridder can regrid data from a source grid of any
dimensionality and in any coordinate system.
The location of each source point is specified by X and Y coordinates
mapped over the same cube dimensions, aka "grid dimensions" : the grid may
have any dimensionality. The X and Y coordinates must also have the same,
defined coord_system.
The weights, if specified, must have the same shape as the X and Y
coordinates.
The output grid can be any 'normal' XY grid, specified by *separate* X
and Y coordinates : That is, X and Y have two different cube dimensions.
The output X and Y coordinates must also have a common, specified
coord_system.
"""
def __init__(self, weights=None):
"""
Point-in-cell regridding scheme suitable for regridding over one
or more orthogonal coordinates.
Optional Args:
* weights:
A :class:`numpy.ndarray` instance that defines the weights
for the grid cells of the source grid. Must have the same shape
as the data of the source grid.
If unspecified, equal weighting is assumed.
"""
self.weights = weights
def regridder(self, src_grid, target_grid):
"""
Creates a point-in-cell regridder to perform regridding from the
source grid to the target grid.
Typically you should use :meth:`iris.cube.Cube.regrid` for
regridding a cube. There are, however, some situations when
constructing your own regridder is preferable. These are detailed in
the :ref:`user guide <caching_a_regridder>`.
Does not support lazy regridding.
Args:
* src_grid:
The :class:`~iris.cube.Cube` defining the source grid.
* target_grid:
The :class:`~iris.cube.Cube` defining the target grid.
Returns:
A callable with the interface:
`callable(cube)`
where `cube` is a cube with the same grid as `src_grid`
that is to be regridded to the `target_grid`.
"""
return CurvilinearRegridder(src_grid, target_grid, self.weights)
``` |
[
{
"content": "Here is the snippet:\n```python\n# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2016-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms ... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2016-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it ... | ```python
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Showing prompts above the statusbar."""
import os.path
import html
import collections
import functools
import dataclasses
from typing import Deque, MutableSequence, Optional, cast
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QTimer, QDir, QModelIndex,
QItemSelectionModel, QObject, QEventLoop)
from PyQt5.QtWidgets import (QWidget, QGridLayout, QVBoxLayout, QLineEdit,
QLabel, QFileSystemModel, QTreeView, QSizePolicy,
QSpacerItem)
from qutebrowser.browser import downloads
from qutebrowser.config import config, configtypes, configexc, stylesheet
from qutebrowser.utils import usertypes, log, utils, qtutils, objreg, message
from qutebrowser.keyinput import modeman
from qutebrowser.api import cmdutils
from qutebrowser.utils import urlmatch
prompt_queue = cast('PromptQueue', None)
@dataclasses.dataclass
class AuthInfo:
"""Authentication info returned by a prompt."""
user: str
password: str
class Error(Exception):
"""Base class for errors in this module."""
class UnsupportedOperationError(Error):
"""Raised when the prompt class doesn't support the requested operation."""
class PromptQueue(QObject):
"""Global manager and queue for upcoming prompts.
The way in which multiple questions are handled deserves some explanation.
If a question is blocking, we *need* to ask it immediately, and can't wait
for previous questions to finish. We could theoretically ask a blocking
question inside of another blocking one, so in ask_question we simply save
the current question on the stack, let the user answer the *most recent*
question, and then restore the previous state.
With a non-blocking question, things are a bit easier. We simply add it to
self._queue if we're still busy handling another question, since it can be
answered at any time.
In either case, as soon as we finished handling a question, we call
_pop_later() which schedules a _pop to ask the next question in _queue. We
schedule it rather than doing it immediately because then the order of how
things happen is clear, e.g. on_mode_left can't happen after we already set
up the *new* question.
Attributes:
_shutting_down: Whether we're currently shutting down the prompter and
should ignore future questions to avoid segfaults.
_loops: A list of local EventLoops to spin in when blocking.
_queue: A deque of waiting questions.
_question: The current Question object if we're handling a question.
Signals:
show_prompts: Emitted with a Question object when prompts should be
shown.
"""
show_prompts = pyqtSignal(usertypes.Question)
def __init__(self, parent=None):
super().__init__(parent)
self._question = None
self._shutting_down = False
self._loops: MutableSequence[qtutils.EventLoop] = []
self._queue: Deque[usertypes.Question] = collections.deque()
message.global_bridge.mode_left.connect(self._on_mode_left)
def __repr__(self):
return utils.get_repr(self, loops=len(self._loops),
queue=len(self._queue), question=self._question)
def _pop_later(self):
"""Helper to call self._pop as soon as everything else is done."""
QTimer.singleShot(0, self._pop)
def _pop(self):
"""Pop a question from the queue and ask it, if there are any."""
log.prompt.debug("Popping from queue {}".format(self._queue))
if self._queue:
question = self._queue.popleft()
if not question.is_aborted:
# the question could already be aborted, e.g. by a cancelled
# download. See
# https://github.com/qutebrowser/qutebrowser/issues/415 and
# https://github.com/qutebrowser/qutebrowser/issues/1249
self.ask_question(question, blocking=False)
def shutdown(self):
"""Cancel all blocking questions.
Quits and removes all running event loops.
Return:
True if loops needed to be aborted,
False otherwise.
"""
log.prompt.debug("Shutting down with loops {}".format(self._loops))
self._shutting_down = True
if self._loops:
for loop in self._loops:
loop.quit()
loop.deleteLater()
return True
else:
return False
@pyqtSlot(usertypes.Question, bool)
def ask_question(self, question, blocking):
"""Display a prompt for a given question.
Args:
question: The Question object to ask.
blocking: If True, this function blocks and returns the result.
Return:
The answer of the user when blocking=True.
None if blocking=False.
"""
log.prompt.debug("Asking question {}, blocking {}, loops {}, queue "
"{}".format(question, blocking, self._loops,
self._queue))
if self._shutting_down:
# If we're currently shutting down we have to ignore this question
# to avoid segfaults - see
# https://github.com/qutebrowser/qutebrowser/issues/95
log.prompt.debug("Ignoring question because we're shutting down.")
question.abort()
return None
if self._question is not None and not blocking:
# We got an async question, but we're already busy with one, so we
# just queue it up for later.
log.prompt.debug("Adding {} to queue.".format(question))
self._queue.append(question)
return None
if blocking:
# If we're blocking we save the old question on the stack, so we
# can restore it after exec, if exec gets called multiple times.
log.prompt.debug("New question is blocking, saving {}".format(
self._question))
old_question = self._question
if old_question is not None:
old_question.interrupted = True
self._question = question
self.show_prompts.emit(question)
if blocking:
loop = qtutils.EventLoop()
self._loops.append(loop)
loop.destroyed.connect(lambda: self._loops.remove(loop))
question.completed.connect(loop.quit)
question.completed.connect(loop.deleteLater)
log.prompt.debug("Starting loop.exec() for {}".format(question))
flags = cast(QEventLoop.ProcessEventsFlags,
QEventLoop.ExcludeSocketNotifiers)
loop.exec(flags)
log.prompt.debug("Ending loop.exec() for {}".format(question))
log.prompt.debug("Restoring old question {}".format(old_question))
self._question = old_question
self.show_prompts.emit(old_question)
if old_question is None:
# Nothing left to restore, so we can go back to popping async
# questions.
if self._queue:
self._pop_later()
return question.answer
else:
question.completed.connect(self._pop_later)
return None
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, mode):
"""Abort question when a prompt mode was left."""
if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]:
return
if self._question is None:
return
log.prompt.debug("Left mode {}, hiding {}".format(
mode, self._question))
self.show_prompts.emit(None)
if self._question.answer is None and not self._question.is_aborted:
log.prompt.debug("Cancelling {} because {} was left".format(
self._question, mode))
self._question.cancel()
self._question = None
class PromptContainer(QWidget):
"""Container for prompts to be shown above the statusbar.
This is a per-window object, however each window shows the same prompt.
Attributes:
_layout: The layout used to show prompts in.
_win_id: The window ID this object is associated with.
Signals:
update_geometry: Emitted when the geometry should be updated.
"""
STYLESHEET = """
QWidget#PromptContainer {
{% if conf.statusbar.position == 'top' %}
border-bottom-left-radius: {{ conf.prompt.radius }}px;
border-bottom-right-radius: {{ conf.prompt.radius }}px;
{% else %}
border-top-left-radius: {{ conf.prompt.radius }}px;
border-top-right-radius: {{ conf.prompt.radius }}px;
{% endif %}
}
QWidget {
font: {{ conf.fonts.prompts }};
color: {{ conf.colors.prompts.fg }};
background-color: {{ conf.colors.prompts.bg }};
}
QLineEdit {
border: {{ conf.colors.prompts.border }};
}
QTreeView {
selection-background-color: {{ conf.colors.prompts.selected.bg }};
border: {{ conf.colors.prompts.border }};
}
QTreeView::branch {
background-color: {{ conf.colors.prompts.bg }};
}
QTreeView::item:selected, QTreeView::item:selected:hover,
QTreeView::branch:selected {
background-color: {{ conf.colors.prompts.selected.bg }};
}
"""
update_geometry = pyqtSignal()
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._layout = QVBoxLayout(self)
self._layout.setContentsMargins(10, 10, 10, 10)
self._win_id = win_id
self._prompt: Optional[_BasePrompt] = None
self.setObjectName('PromptContainer')
self.setAttribute(Qt.WA_StyledBackground, True)
stylesheet.set_register(self)
message.global_bridge.prompt_done.connect(self._on_prompt_done)
prompt_queue.show_prompts.connect(self._on_show_prompts)
message.global_bridge.mode_left.connect(self._on_global_mode_left)
def __repr__(self):
return utils.get_repr(self, win_id=self._win_id)
@pyqtSlot(usertypes.Question)
def _on_show_prompts(self, question):
"""Show a prompt for the given question.
Args:
question: A Question object or None.
"""
item = self._layout.takeAt(0)
if item is not None:
widget = item.widget()
log.prompt.debug("Deleting old prompt {}".format(widget))
widget.hide()
widget.deleteLater()
if question is None:
log.prompt.debug("No prompts left, hiding prompt container.")
self._prompt = None
self.hide()
return
classes = {
usertypes.PromptMode.yesno: YesNoPrompt,
usertypes.PromptMode.text: LineEditPrompt,
usertypes.PromptMode.user_pwd: AuthenticationPrompt,
usertypes.PromptMode.download: DownloadFilenamePrompt,
usertypes.PromptMode.alert: AlertPrompt,
}
klass = classes[question.mode]
prompt = klass(question)
log.prompt.debug("Displaying prompt {}".format(prompt))
self._prompt = prompt
# If this question was interrupted, we already connected the signal
if not question.interrupted:
question.aborted.connect(
functools.partial(self._on_aborted, prompt.KEY_MODE))
modeman.enter(self._win_id, prompt.KEY_MODE, 'question asked')
self.setSizePolicy(prompt.sizePolicy())
self._layout.addWidget(prompt)
prompt.show()
self.show()
prompt.setFocus()
self.update_geometry.emit()
@pyqtSlot()
def _on_aborted(self, key_mode):
"""Leave KEY_MODE whenever a prompt is aborted."""
try:
modeman.leave(self._win_id, key_mode, 'aborted', maybe=True)
except objreg.RegistryUnavailableError:
# window was deleted: ignore
pass
@pyqtSlot(usertypes.KeyMode)
def _on_prompt_done(self, key_mode):
"""Leave the prompt mode in this window if a question was answered."""
modeman.leave(self._win_id, key_mode, ':prompt-accept', maybe=True)
@pyqtSlot(usertypes.KeyMode)
def _on_global_mode_left(self, mode):
"""Leave prompt/yesno mode in this window if it was left elsewhere.
This ensures no matter where a prompt was answered, we leave the prompt
mode and dispose of the prompt object in every window.
"""
if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]:
return
modeman.leave(self._win_id, mode, 'left in other window', maybe=True)
item = self._layout.takeAt(0)
if item is not None:
widget = item.widget()
log.prompt.debug("Deleting prompt {}".format(widget))
widget.hide()
widget.deleteLater()
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno])
def prompt_accept(self, value=None, *, save=False):
"""Accept the current prompt.
//
This executes the next action depending on the question mode, e.g. asks
for the password or leaves the mode.
Args:
value: If given, uses this value instead of the entered one.
For boolean prompts, "yes"/"no" are accepted as value.
save: Save the value to the config.
"""
assert self._prompt is not None
question = self._prompt.question
try:
done = self._prompt.accept(value, save=save)
except Error as e:
raise cmdutils.CommandError(str(e))
if done:
message.global_bridge.prompt_done.emit(self._prompt.KEY_MODE)
question.done()
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt], maxsplit=0)
def prompt_open_download(self, cmdline: str = None,
pdfjs: bool = False) -> None:
"""Immediately open a download.
If no specific command is given, this will use the system's default
application to open the file.
Args:
cmdline: The command which should be used to open the file. A `{}`
is expanded to the temporary file name. If no `{}` is
present, the filename is automatically appended to the
cmdline.
pdfjs: Open the download via PDF.js.
"""
assert self._prompt is not None
try:
self._prompt.download_open(cmdline, pdfjs=pdfjs)
except UnsupportedOperationError:
pass
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt])
@cmdutils.argument('which', choices=['next', 'prev'])
def prompt_item_focus(self, which):
"""Shift the focus of the prompt file completion menu to another item.
Args:
which: 'next', 'prev'
"""
assert self._prompt is not None
try:
self._prompt.item_focus(which)
except UnsupportedOperationError:
pass
@cmdutils.register(
instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt, usertypes.KeyMode.yesno])
def prompt_yank(self, sel=False):
"""Yank URL to clipboard or primary selection.
Args:
sel: Use the primary selection instead of the clipboard.
"""
assert self._prompt is not None
question = self._prompt.question
if question.url is None:
message.error('No URL found.')
return
if sel and utils.supports_selection():
target = 'primary selection'
else:
sel = False
target = 'clipboard'
utils.set_clipboard(question.url, sel)
message.info("Yanked to {}: {}".format(target, question.url))
class LineEdit(QLineEdit):
"""A line edit used in prompts."""
def __init__(self, parent=None):
super().__init__(parent)
self.setStyleSheet("""
QLineEdit {
background-color: transparent;
}
""")
self.setAttribute(Qt.WA_MacShowFocusRect, False)
def keyPressEvent(self, e):
"""Override keyPressEvent to paste primary selection on Shift + Ins."""
if e.key() == Qt.Key_Insert and e.modifiers() == Qt.ShiftModifier:
try:
text = utils.get_clipboard(selection=True, fallback=True)
except utils.ClipboardError: # pragma: no cover
e.ignore()
else:
e.accept()
self.insert(text)
return
super().keyPressEvent(e)
def __repr__(self):
return utils.get_repr(self)
class _BasePrompt(QWidget):
"""Base class for all prompts."""
KEY_MODE = usertypes.KeyMode.prompt
def __init__(self, question, parent=None):
super().__init__(parent)
self.question = question
self._vbox = QVBoxLayout(self)
self._vbox.setSpacing(15)
self._key_grid = None
def __repr__(self):
return utils.get_repr(self, question=self.question, constructor=True)
def _init_texts(self, question):
assert question.title is not None, question
title = '<font size="4"><b>{}</b></font>'.format(
html.escape(question.title))
title_label = QLabel(title, self)
self._vbox.addWidget(title_label)
if question.text is not None:
# Not doing any HTML escaping here as the text can be formatted
text_label = QLabel(question.text)
text_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self._vbox.addWidget(text_label)
def _init_key_label(self):
assert self._key_grid is None, self._key_grid
self._key_grid = QGridLayout()
self._key_grid.setVerticalSpacing(0)
all_bindings = config.key_instance.get_reverse_bindings_for(
self.KEY_MODE.name)
labels = []
for cmd, text in self._allowed_commands():
bindings = all_bindings.get(cmd, [])
if bindings:
binding = None
preferred = ['<enter>', '<escape>']
for pref in preferred:
if pref in bindings:
binding = pref
if binding is None:
binding = bindings[0]
key_label = QLabel('<b>{}</b>'.format(html.escape(binding)))
text_label = QLabel(text)
labels.append((key_label, text_label))
for i, (key_label, text_label) in enumerate(labels):
self._key_grid.addWidget(key_label, i, 0)
self._key_grid.addWidget(text_label, i, 1)
spacer = QSpacerItem(0, 0, QSizePolicy.Expanding)
self._key_grid.addItem(spacer, 0, 2)
self._vbox.addLayout(self._key_grid)
def _check_save_support(self, save):
if save:
raise UnsupportedOperationError("Saving answers is only possible "
"with yes/no prompts.")
def accept(self, value=None, save=False):
raise NotImplementedError
def download_open(self, cmdline, pdfjs):
"""Open the download directly if this is a download prompt."""
utils.unused(cmdline)
utils.unused(pdfjs)
raise UnsupportedOperationError
def item_focus(self, _which):
"""Switch to next file item if this is a filename prompt.."""
raise UnsupportedOperationError
def _allowed_commands(self):
"""Get the commands we could run as response to this message."""
raise NotImplementedError
class LineEditPrompt(_BasePrompt):
"""A prompt for a single text value."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._lineedit = LineEdit(self)
self._init_texts(question)
self._vbox.addWidget(self._lineedit)
if question.default:
self._lineedit.setText(question.default)
self._lineedit.selectAll()
self.setFocusProxy(self._lineedit)
self._init_key_label()
def accept(self, value=None, save=False):
self._check_save_support(save)
text = value if value is not None else self._lineedit.text()
self.question.answer = text
return True
def _allowed_commands(self):
return [('prompt-accept', 'Accept'), ('mode-leave', 'Abort')]
class FilenamePrompt(_BasePrompt):
"""A prompt for a filename."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
self._lineedit = LineEdit(self)
if question.default:
self._lineedit.setText(question.default)
self._lineedit.textEdited.connect(self._set_fileview_root)
self._vbox.addWidget(self._lineedit)
self.setFocusProxy(self._lineedit)
self._init_fileview()
self._set_fileview_root(question.default)
if config.val.prompt.filebrowser:
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self._to_complete = ''
@pyqtSlot(str)
def _set_fileview_root(self, path, *, tabbed=False):
"""Set the root path for the file display."""
separators = os.sep
if os.altsep is not None:
separators += os.altsep
dirname = os.path.dirname(path)
basename = os.path.basename(path)
if not tabbed:
self._to_complete = ''
try:
if not path:
pass
elif path in separators and os.path.isdir(path):
# Input "/" -> don't strip anything
pass
elif path[-1] in separators and os.path.isdir(path):
# Input like /foo/bar/ -> show /foo/bar/ contents
path = path.rstrip(separators)
elif os.path.isdir(dirname) and not tabbed:
# Input like /foo/ba -> show /foo contents
path = dirname
self._to_complete = basename
else:
return
except OSError:
log.prompt.exception("Failed to get directory information")
return
root = self._file_model.setRootPath(path)
self._file_view.setRootIndex(root)
@pyqtSlot(QModelIndex)
def _insert_path(self, index, *, clicked=True):
"""Handle an element selection.
Args:
index: The QModelIndex of the selected element.
clicked: Whether the element was clicked.
"""
if index == QModelIndex():
path = os.path.join(self._file_model.rootPath(), self._to_complete)
else:
path = os.path.normpath(self._file_model.filePath(index))
if clicked:
path += os.sep
else:
# On Windows, when we have C:\foo and tab over .., we get C:\
path = path.rstrip(os.sep)
log.prompt.debug('Inserting path {}'.format(path))
self._lineedit.setText(path)
self._lineedit.setFocus()
self._set_fileview_root(path, tabbed=True)
if clicked:
# Avoid having a ..-subtree highlighted
self._file_view.setCurrentIndex(QModelIndex())
def _init_fileview(self):
self._file_view = QTreeView(self)
self._file_model = QFileSystemModel(self)
self._file_view.setModel(self._file_model)
self._file_view.clicked.connect(self._insert_path)
if config.val.prompt.filebrowser:
self._vbox.addWidget(self._file_view)
else:
self._file_view.hide()
# Only show name
self._file_view.setHeaderHidden(True)
for col in range(1, 4):
self._file_view.setColumnHidden(col, True)
# Nothing selected initially
self._file_view.setCurrentIndex(QModelIndex())
# The model needs to be sorted so we get the correct first/last index
self._file_model.directoryLoaded.connect(
lambda: self._file_model.sort(0))
def accept(self, value=None, save=False):
self._check_save_support(save)
text = value if value is not None else self._lineedit.text()
text = downloads.transform_path(text)
if text is None:
message.error("Invalid filename")
return False
self.question.answer = text
return True
def item_focus(self, which):
# This duplicates some completion code, but I don't see a nicer way...
assert which in ['prev', 'next'], which
selmodel = self._file_view.selectionModel()
parent = self._file_view.rootIndex()
first_index = self._file_model.index(0, 0, parent)
row = self._file_model.rowCount(parent) - 1
last_index = self._file_model.index(row, 0, parent)
if not first_index.isValid():
# No entries
return
assert last_index.isValid()
idx = selmodel.currentIndex()
if not idx.isValid():
# No item selected yet
idx = last_index if which == 'prev' else first_index
elif which == 'prev':
idx = self._file_view.indexAbove(idx)
else:
assert which == 'next', which
idx = self._file_view.indexBelow(idx)
# wrap around if we arrived at beginning/end
if not idx.isValid():
idx = last_index if which == 'prev' else first_index
idx = self._do_completion(idx, which)
selmodel.setCurrentIndex(
idx,
QItemSelectionModel.ClearAndSelect | # type: ignore[arg-type]
QItemSelectionModel.Rows)
self._insert_path(idx, clicked=False)
def _do_completion(self, idx, which):
filename = self._file_model.fileName(idx)
while not filename.startswith(self._to_complete) and idx.isValid():
if which == 'prev':
idx = self._file_view.indexAbove(idx)
else:
assert which == 'next', which
idx = self._file_view.indexBelow(idx)
filename = self._file_model.fileName(idx)
return idx
def _allowed_commands(self):
return [('prompt-accept', 'Accept'), ('mode-leave', 'Abort')]
class DownloadFilenamePrompt(FilenamePrompt):
"""A prompt for a filename for downloads."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._file_model.setFilter(
QDir.AllDirs | QDir.Drives | QDir.NoDot) # type: ignore[arg-type]
def accept(self, value=None, save=False):
done = super().accept(value, save)
answer = self.question.answer
if answer is not None:
self.question.answer = downloads.FileDownloadTarget(answer)
return done
def download_open(self, cmdline, pdfjs):
if pdfjs:
target: 'downloads._DownloadTarget' = downloads.PDFJSDownloadTarget()
else:
target = downloads.OpenFileDownloadTarget(cmdline)
self.question.answer = target
self.question.done()
message.global_bridge.prompt_done.emit(self.KEY_MODE)
def _allowed_commands(self):
cmds = [
('prompt-accept', 'Accept'),
('mode-leave', 'Abort'),
('prompt-open-download', "Open download"),
('prompt-open-download --pdfjs', "Open download via PDF.js"),
('prompt-yank', "Yank URL"),
]
return cmds
class AuthenticationPrompt(_BasePrompt):
"""A prompt for username/password."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
user_label = QLabel("Username:", self)
self._user_lineedit = LineEdit(self)
password_label = QLabel("Password:", self)
self._password_lineedit = LineEdit(self)
self._password_lineedit.setEchoMode(QLineEdit.Password)
grid = QGridLayout()
grid.addWidget(user_label, 1, 0)
grid.addWidget(self._user_lineedit, 1, 1)
grid.addWidget(password_label, 2, 0)
grid.addWidget(self._password_lineedit, 2, 1)
self._vbox.addLayout(grid)
self._init_key_label()
assert not question.default, question.default
self.setFocusProxy(self._user_lineedit)
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is not None:
if ':' not in value:
raise Error("Value needs to be in the format "
"username:password, but {} was given".format(
value))
username, password = value.split(':', maxsplit=1)
self.question.answer = AuthInfo(username, password)
return True
elif self._user_lineedit.hasFocus():
# Earlier, tab was bound to :prompt-accept, so to still support
# that we simply switch the focus when tab was pressed.
self._password_lineedit.setFocus()
return False
else:
self.question.answer = AuthInfo(self._user_lineedit.text(),
self._password_lineedit.text())
return True
def item_focus(self, which):
"""Support switching between fields with tab."""
assert which in ['prev', 'next'], which
if which == 'next' and self._user_lineedit.hasFocus():
self._password_lineedit.setFocus()
elif which == 'prev' and self._password_lineedit.hasFocus():
self._user_lineedit.setFocus()
def _allowed_commands(self):
return [('prompt-accept', "Accept"),
('mode-leave', "Abort")]
class YesNoPrompt(_BasePrompt):
"""A prompt with yes/no answers."""
KEY_MODE = usertypes.KeyMode.yesno
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
def _check_save_support(self, save):
if save and self.question.option is None:
raise Error("No setting available to save the answer for this "
"question.")
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is None:
if self.question.default is None:
raise Error("No default value was set for this question!")
self.question.answer = self.question.default
elif value == 'yes':
self.question.answer = True
elif value == 'no':
self.question.answer = False
else:
raise Error("Invalid value {} - expected yes/no!".format(value))
if save:
opt = config.instance.get_opt(self.question.option)
assert isinstance(opt.typ, configtypes.Bool)
pattern = urlmatch.UrlPattern(self.question.url)
try:
config.instance.set_obj(opt.name, self.question.answer,
pattern=pattern, save_yaml=True)
except configexc.Error as e:
raise Error(str(e))
return True
def _allowed_commands(self):
cmds = []
cmds.append(('prompt-accept yes', "Yes"))
if self.question.option is not None:
cmds.append(('prompt-accept --save yes', "Always"))
cmds.append(('prompt-accept no', "No"))
if self.question.option is not None:
cmds.append(('prompt-accept --save no', "Never"))
if self.question.default is not None:
assert self.question.default in [True, False]
default = 'yes' if self.question.default else 'no'
cmds.append(('prompt-accept', "Use default ({})".format(default)))
cmds.append(('mode-leave', "Abort"))
cmds.append(('prompt-yank', "Yank URL"))
return cmds
class AlertPrompt(_BasePrompt):
"""A prompt without any answer possibility."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is not None:
raise Error("No value is permitted with alert prompts!")
# Simply mark prompt as done without setting self.question.answer
return True
def _allowed_commands(self):
return [('prompt-accept', "Hide")]
def init():
"""Initialize global prompt objects."""
global prompt_queue
prompt_queue = PromptQueue()
message.global_bridge.ask_question.connect( # type: ignore[call-arg]
prompt_queue.ask_question, Qt.DirectConnection)
``` |
[
{
"content": "Here is a code file:\n```python\n\"\"\"Datatype validation.\"\"\"\n\n__all__ = ['failures', 'is_valid']\n\nfrom collections import defaultdict\n\nfrom datatype.tools import Choice, Literal, walk\n\n\ndef is_valid(datatype, value):\n \"\"\"Return boolean representing validity of `value` against ... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n\"\"\"Datatype validation.\"\"\"\n\n__all__ = ['failures', 'is_valid']\n\nfrom collections import defaultdict\n\nfrom datatype.tools import Choice, Literal, walk\n\n\ndef is_valid(datatype, value):\n \"\"\"Return boolean representing validity of ... | ```python
"""Datatype validation."""
__all__ = ['failures', 'is_valid']
from collections import defaultdict
from datatype.tools import Choice, Literal, walk
def is_valid(datatype, value):
"""Return boolean representing validity of `value` against `datatype`."""
return not failures(datatype, value)
def failures(datatype, value):
"""Return list of failures (if any) validating `value` against `datatype`.
Params:
`datatype`: Datatype to validate value against. See README.markdown
for examples.
`value`: Value to validate
`path`: Used internally for location of failures.
Example:
>>> failures('int', 'foo')
['expected int, got str']
"""
fails = []
def validate(path, *args):
msg = '%s: %%s' % path if path else '%s'
fails.extend(msg % m for m in validate_step(*args) or [])
walk(datatype, value, validate)
return fails
def validate_step(datatype, value, options):
"""Validate simple value in datatype."""
dtype, vtype = type(datatype), type(value)
# nulls
if vtype is type(None):
if 'nullable' not in options:
return ['unexpected null for non-nullable type']
# primitives
elif dtype == str:
if not any(type(value) is x for x in primitives[datatype]):
return ['expected %s, got %s' % (datatype, vtype.__name__)]
# lists & tuples
elif dtype == list:
dlen, vlen = len(datatype), len(value)
if dlen > 1 and dlen != vlen:
error = 'missing required' if dlen > vlen else 'unexpected'
return ['%s value at index %s' % (error, i)
for i in xrange(min(dlen, vlen), max(dlen, vlen))]
# objects (dictionaries)
elif dtype in (defaultdict, dict):
if vtype not in (defaultdict, dict):
return ['expected dict, got %s' % vtype.__name__]
optional = lambda x: x.startswith('optional ')
all_keys = (k.replace('optional ', '', 1) if optional(k) else k
for k in datatype)
required_keys = (k for k in datatype
if not (optional(k) or k == '_any_'))
failures = ['missing required property: "%s"' % k
for k in set(required_keys) - set(value)]
if '_any_' not in datatype:
failures += ['unexpected property "%s"' % k
for k in set(value) - set(all_keys)]
return failures
# choice datatypes
elif dtype == Choice:
if not any(is_valid(x, value) for x in datatype):
return ['%s is none of expected %s' % (value, datatype)]
# literal values
elif dtype == Literal:
if datatype.value != value:
return ['expected literal value "%s", got "%s"' % (datatype.value,
value)]
primitives = {
'int': (int,),
'float': (float,),
'str': (str, unicode),
'bool': (bool,)
}
``` |
[
{
"content": "Repeat the following code:\n```python\n#\n# Copyright 2009-2016 Red Hat, Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n#\n# Copyright 2009-2016 Red Hat, Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the ... | ```python
#
# Copyright 2009-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import os
import errno
import time
import threading
import struct
import logging
import uuid
from six.moves import queue
from vdsm.config import config
from vdsm.storage import misc
from vdsm.storage import task
from vdsm.storage.exception import InvalidParameterException
from vdsm.storage.threadPool import ThreadPool
from vdsm import concurrent
from vdsm import constants
__author__ = "ayalb"
__date__ = "$Mar 9, 2009 5:25:07 PM$"
CHECKSUM_BYTES = 4
MAILBOX_SIZE = 4096
PACKED_UUID_SIZE = 16
VOLUME_MAX_SIZE = 0xFFFFFFFF # 64 bit unsigned max size
SIZE_CHARS = 16
MESSAGE_VERSION = "1"
MESSAGE_SIZE = 64
CLEAN_MESSAGE = "\1" * MESSAGE_SIZE
EXTEND_CODE = "xtnd"
BLOCK_SIZE = 512
REPLY_OK = 1
EMPTYMAILBOX = MAILBOX_SIZE * "\0"
SLOTS_PER_MAILBOX = int(MAILBOX_SIZE / MESSAGE_SIZE)
# Last message slot is reserved for metadata (checksum, extendable mailbox,
# etc)
MESSAGES_PER_MAILBOX = SLOTS_PER_MAILBOX - 1
_zeroCheck = misc.checksum(EMPTYMAILBOX, CHECKSUM_BYTES)
# Assumes CHECKSUM_BYTES equals 4!!!
pZeroChecksum = struct.pack('<l', _zeroCheck)
def dec2hex(n):
return "%x" % n
def runTask(args):
if type(args) == tuple:
cmd = args[0]
args = args[1:]
else:
cmd = args
args = None
ctask = task.Task(id=None, name=cmd)
ctask.prepare(cmd, *args)
def _mboxExecCmd(*args, **kwargs):
return misc.execCmd(*args, **kwargs)
class SPM_Extend_Message:
log = logging.getLogger('storage.SPM.Messages.Extend')
def __init__(self, volumeData, newSize, callbackFunction=None):
if ('poolID' not in volumeData) or \
('domainID' not in volumeData) or \
('volumeID' not in volumeData):
self.log.error('create extend msg failed for volume: %s, size:'
' %d', '-'.join(volumeData.values()), newSize)
raise InvalidParameterException('volumeData dictionary',
volumeData)
if (newSize < 0) or (newSize > VOLUME_MAX_SIZE):
raise InvalidParameterException('volumeSize', newSize)
misc.validateUUID(volumeData['domainID'], 'domainID')
misc.validateUUID(volumeData['volumeID'], 'volumeID')
self.pool = volumeData['poolID']
self.volumeData = volumeData
self.newSize = str(dec2hex(newSize))
self.callback = callbackFunction
# Message structure is rigid (order must be kept and is relied upon):
# Version (1 byte), OpCode (4 bytes), Domain UUID (16 bytes), Volume
# UUID (16 bytes), Requested size (16 bytes), Padding to 64 bytes (14
# bytes)
domain = misc.packUuid(volumeData['domainID'])
volume = misc.packUuid(volumeData['volumeID'])
# Build base payload
payload = MESSAGE_VERSION + EXTEND_CODE + domain + volume + \
self.newSize.rjust(SIZE_CHARS, "0")
# Pad payload with zeros
self.payload = payload.ljust(MESSAGE_SIZE, "0")
self.log.debug('new extend msg created: domain: %s, volume: %s',
volumeData['domainID'], volumeData['volumeID'])
def __getitem__(self, index):
return self.payload[index]
def checkReply(self, reply):
# Sanity check - Make sure reply is for current message
sizeOffset = 5 + 2 * PACKED_UUID_SIZE
if (self.payload[0:sizeOffset] != reply[0:sizeOffset]):
self.log.error("SPM_Extend_Message: Reply message volume data "
"(domainID + volumeID) differs from request "
"message, reply : %s, orig: %s", reply,
self.payload)
raise RuntimeError('Incorrect reply')
# if self.payload[sizeOffset:sizeOffset + PACKED_UUID_SIZE] > \
# reply[sizeOffset:sizeOffset + PACKED_UUID_SIZE]):
# self.log.error("SPM_Extend_Message: New size is smaller than "
# "requested size")
# raise RuntimeError('Request failed')
return REPLY_OK
@classmethod
def processRequest(cls, pool, msgID, payload):
cls.log.debug("processRequest, payload:" + repr(payload))
sdOffset = 5
volumeOffset = sdOffset + PACKED_UUID_SIZE
sizeOffset = volumeOffset + PACKED_UUID_SIZE
volume = {}
volume['poolID'] = pool.spUUID
volume['domainID'] = misc.unpackUuid(
payload[sdOffset:sdOffset + PACKED_UUID_SIZE])
volume['volumeID'] = misc.unpackUuid(
payload[volumeOffset:volumeOffset + PACKED_UUID_SIZE])
size = int(payload[sizeOffset:sizeOffset + SIZE_CHARS], 16)
cls.log.info("processRequest: extending volume %s "
"in domain %s (pool %s) to size %d", volume['volumeID'],
volume['domainID'], volume['poolID'], size)
msg = None
try:
try:
pool.extendVolume(volume['domainID'], volume['volumeID'], size)
msg = SPM_Extend_Message(volume, size)
except:
cls.log.error("processRequest: Exception caught while trying "
"to extend volume: %s in domain: %s",
volume['volumeID'], volume['domainID'],
exc_info=True)
msg = SPM_Extend_Message(volume, 0)
finally:
pool.spmMailer.sendReply(msgID, msg)
return {'status': {'code': 0, 'message': 'Done'}}
class HSM_Mailbox:
log = logging.getLogger('storage.Mailbox.HSM')
def __init__(self, hostID, poolID, inbox, outbox, monitorInterval=2):
self._hostID = str(hostID)
self._poolID = str(poolID)
self._monitorInterval = monitorInterval
self._queue = queue.Queue(-1)
self._inbox = inbox
if not os.path.exists(self._inbox):
self.log.error("HSM_Mailbox create failed - inbox %s does not "
"exist" % repr(self._inbox))
raise RuntimeError("HSM_Mailbox create failed - inbox %s does not "
"exist" % repr(self._inbox))
self._outbox = outbox
if not os.path.exists(self._outbox):
self.log.error("HSM_Mailbox create failed - outbox %s does not "
"exist" % repr(self._outbox))
raise RuntimeError("HSM_Mailbox create failed - outbox %s does "
"not exist" % repr(self._outbox))
self._mailman = HSM_MailMonitor(self._inbox, self._outbox, hostID,
self._queue, monitorInterval)
self.log.debug('HSM_MailboxMonitor created for pool %s' % self._poolID)
def sendExtendMsg(self, volumeData, newSize, callbackFunction=None):
msg = SPM_Extend_Message(volumeData, newSize, callbackFunction)
if str(msg.pool) != self._poolID:
raise ValueError('PoolID does not correspond to Mailbox pool')
self._queue.put(msg)
def stop(self):
if self._mailman:
self._mailman.immStop()
self._mailman.tp.joinAll(waitForTasks=False)
else:
self.log.warning("HSM_MailboxMonitor - No mail monitor object "
"available to stop")
def wait(self, timeout=None):
return self._mailman.wait(timeout)
def flushMessages(self):
if self._mailman:
self._mailman.immFlush()
else:
self.log.warning("HSM_MailboxMonitor - No mail monitor object "
"available to flush")
class HSM_MailMonitor(object):
log = logging.getLogger('storage.MailBox.HsmMailMonitor')
def __init__(self, inbox, outbox, hostID, queue, monitorInterval):
# Save arguments
tpSize = config.getint('irs', 'thread_pool_size') / 2
waitTimeout = wait_timeout(monitorInterval)
maxTasks = config.getint('irs', 'max_tasks')
self.tp = ThreadPool("mailbox-hsm", tpSize, waitTimeout, maxTasks)
self._stop = False
self._flush = False
self._queue = queue
self._activeMessages = {}
self._monitorInterval = monitorInterval
self._hostID = int(hostID)
self._used_slots_array = [0] * MESSAGES_PER_MAILBOX
self._outgoingMail = EMPTYMAILBOX
self._incomingMail = EMPTYMAILBOX
# TODO: add support for multiple paths (multiple mailboxes)
self._inCmd = [constants.EXT_DD,
'if=' + str(inbox),
'iflag=direct,fullblock',
'bs=' + str(MAILBOX_SIZE),
'count=1',
'skip=' + str(self._hostID)
]
self._outCmd = [constants.EXT_DD,
'of=' + str(outbox),
'iflag=fullblock',
'oflag=direct',
'conv=notrunc',
'bs=' + str(MAILBOX_SIZE),
'count=1',
'seek=' + str(self._hostID)
]
self._init = False
self._initMailbox() # Read initial mailbox state
self._msgCounter = 0
self._sendMail() # Clear outgoing mailbox
self._thread = concurrent.thread(self.run, name="mailbox-hsm",
log=self.log)
self._thread.start()
def _initMailbox(self):
# Sync initial incoming mail state with storage view
(rc, out, err) = _mboxExecCmd(self._inCmd, raw=True)
if rc == 0:
self._incomingMail = out
self._init = True
else:
self.log.warning("HSM_MailboxMonitor - Could not initialize "
"mailbox, will not accept requests until init "
"succeeds")
def immStop(self):
self._stop = True
def immFlush(self):
self._flush = True
def wait(self, timeout=None):
self._thread.join(timeout=timeout)
return not self._thread.is_alive()
def _handleResponses(self, newMsgs):
rc = False
for i in range(0, MESSAGES_PER_MAILBOX):
# Skip checking non used slots
if self._used_slots_array[i] == 0:
continue
# Skip empty return messages (messages with version 0)
start = i * MESSAGE_SIZE
# First byte of message is message version.
# Check return message version, if 0 then message is empty
if newMsgs[start] in ['\0', '0']:
continue
for j in range(start, start + MESSAGE_SIZE):
if newMsgs[j] != self._incomingMail[j]:
break
# If search exhausted then message hasn't changed since last read
# and can be skipped
if j == (start + MESSAGE_SIZE - 1):
continue
#
# We only get here if there is a novel reply so we can remove the
# message from the active list and the outgoing mail and handle the
# reply
#
rc = True
newMsg = newMsgs[start:start + MESSAGE_SIZE]
if newMsg == CLEAN_MESSAGE:
del self._activeMessages[i]
self._used_slots_array[i] = 0
self._msgCounter -= 1
self._outgoingMail = self._outgoingMail[0:start] + \
MESSAGE_SIZE * "\0" + self._outgoingMail[start +
MESSAGE_SIZE:]
continue
msg = self._activeMessages[i]
self._activeMessages[i] = CLEAN_MESSAGE
self._outgoingMail = self._outgoingMail[0:start] + \
CLEAN_MESSAGE + self._outgoingMail[start + MESSAGE_SIZE:]
try:
self.log.debug("HSM_MailboxMonitor(%s/%s) - Checking reply: "
"%s", self._msgCounter, MESSAGES_PER_MAILBOX,
repr(newMsg))
msg.checkReply(newMsg)
if msg.callback:
try:
id = str(uuid.uuid4())
if not self.tp.queueTask(id, runTask, (msg.callback,
msg.volumeData)):
raise Exception()
except:
self.log.error("HSM_MailMonitor: exception caught "
"while running msg callback, for "
"message: %s, callback function: %s",
repr(msg.payload), msg.callback,
exc_info=True)
except RuntimeError as e:
self.log.error("HSM_MailMonitor: exception: %s caught while "
"checking reply for message: %s, reply: %s",
str(e), repr(msg.payload), repr(newMsg))
except:
self.log.error("HSM_MailMonitor: exception caught while "
"checking reply from SPM, request was: %s "
"reply: %s", repr(msg.payload), repr(newMsg),
exc_info=True)
# Finished processing incoming mail, now save mail to compare against
# next batch
self._incomingMail = newMsgs
return rc
def _checkForMail(self):
# self.log.debug("HSM_MailMonitor - checking for mail")
# self.log.debug("Running command: " + str(self._inCmd))
(rc, in_mail, err) = misc.execCmd(self._inCmd, raw=True)
if rc:
raise RuntimeError("_handleResponses.Could not read mailbox - rc "
"%s" % rc)
if (len(in_mail) != MAILBOX_SIZE):
raise RuntimeError("_handleResponses.Could not read mailbox - len "
"%s != %s" % (len(in_mail), MAILBOX_SIZE))
# self.log.debug("Parsing inbox content: %s", in_mail)
return self._handleResponses(in_mail)
def _sendMail(self):
self.log.info("HSM_MailMonitor sending mail to SPM - " +
str(self._outCmd))
chk = misc.checksum(
self._outgoingMail[0:MAILBOX_SIZE - CHECKSUM_BYTES],
CHECKSUM_BYTES)
pChk = struct.pack('<l', chk) # Assumes CHECKSUM_BYTES equals 4!!!
self._outgoingMail = \
self._outgoingMail[0:MAILBOX_SIZE - CHECKSUM_BYTES] + pChk
_mboxExecCmd(self._outCmd, data=self._outgoingMail)
def _handleMessage(self, message):
# TODO: add support for multiple mailboxes
freeSlot = False
for i in range(0, MESSAGES_PER_MAILBOX):
if self._used_slots_array[i] == 0:
if not freeSlot:
freeSlot = i
continue
duplicate = True
for j in range(0, MESSAGE_SIZE):
if message[j] != self._activeMessages[i][j]:
duplicate = False
break
if duplicate:
self.log.debug("HSM_MailMonitor - ignoring duplicate message "
"%s" % (repr(message)))
return
if not freeSlot:
raise RuntimeError("HSM_MailMonitor - Active messages list full, "
"cannot add new message")
self._msgCounter += 1
self._used_slots_array[freeSlot] = 1
self._activeMessages[freeSlot] = message
start = freeSlot * MESSAGE_SIZE
end = start + MESSAGE_SIZE
self._outgoingMail = self._outgoingMail[0:start] + message.payload + \
self._outgoingMail[end:]
self.log.debug("HSM_MailMonitor - start: %s, end: %s, len: %s, "
"message(%s/%s): %s" %
(start, end, len(self._outgoingMail), self._msgCounter,
MESSAGES_PER_MAILBOX,
repr(self._outgoingMail[start:end])))
def run(self):
try:
failures = 0
# Do not start processing requests before incoming mailbox is
# initialized
while not self._init and not self._stop:
try:
time.sleep(2)
self._initMailbox() # Read initial mailbox state
except:
pass
while not self._stop:
try:
message = None
sendMail = False
# If no message is pending, block_wait until a new message
# or stop command arrives
while not self._stop and not message and \
not self._activeMessages:
try:
# self.log.debug("No requests in queue, going to "
# "sleep until new requests arrive")
# Check if a new message is waiting to be sent
message = self._queue.get(
block=True, timeout=self._monitorInterval)
self._handleMessage(message)
message = None
sendMail = True
except queue.Empty:
pass
if self._stop:
break
# If pending messages available, check if there are new
# messages waiting in queue as well
empty = False
while (not empty) and \
(len(self._activeMessages) < MESSAGES_PER_MAILBOX):
# TODO: Remove single mailbox limitation
try:
message = self._queue.get(block=False)
self._handleMessage(message)
message = None
sendMail = True
except queue.Empty:
empty = True
if self._flush:
self._flush = False
sendMail = True
try:
sendMail |= self._checkForMail()
failures = 0
except:
self.log.error("HSM_MailboxMonitor - Exception caught "
"while checking for mail",
exc_info=True)
failures += 1
if sendMail:
self._sendMail()
# If there are active messages waiting for SPM reply, wait
# a few seconds before performing another IO op
if self._activeMessages and not self._stop:
# If recurring failures then sleep for one minute
# before retrying
if (failures > 9):
time.sleep(60)
else:
time.sleep(self._monitorInterval)
except:
self.log.error("HSM_MailboxMonitor - Incoming mail"
"monitoring thread caught exception; "
"will try to recover", exc_info=True)
finally:
self.log.info("HSM_MailboxMonitor - Incoming mail monitoring "
"thread stopped, clearing outgoing mail")
self._outgoingMail = EMPTYMAILBOX
self._sendMail() # Clear outgoing mailbox
class SPM_MailMonitor:
log = logging.getLogger('storage.MailBox.SpmMailMonitor')
def registerMessageType(self, messageType, callback):
self._messageTypes[messageType] = callback
def unregisterMessageType(self, messageType):
del self._messageTypes[messageType]
def __init__(self, poolID, maxHostID, inbox, outbox, monitorInterval=2):
"""
Note: inbox paramerter here should point to the HSM's outbox
mailbox file, and vice versa.
"""
self._messageTypes = {}
# Save arguments
self._stop = False
self._stopped = False
self._poolID = poolID
tpSize = config.getint('irs', 'thread_pool_size') / 2
waitTimeout = wait_timeout(monitorInterval)
maxTasks = config.getint('irs', 'max_tasks')
self.tp = ThreadPool("mailbox-spm", tpSize, waitTimeout, maxTasks)
self._inbox = inbox
if not os.path.exists(self._inbox):
self.log.error("SPM_MailMonitor create failed - inbox %s does not "
"exist" % repr(self._inbox))
raise RuntimeError("SPM_MailMonitor create failed - inbox %s does "
"not exist" % repr(self._inbox))
self._outbox = outbox
if not os.path.exists(self._outbox):
self.log.error("SPM_MailMonitor create failed - outbox %s does "
"not exist" % repr(self._outbox))
raise RuntimeError("SPM_MailMonitor create failed - outbox %s "
"does not exist" % repr(self._outbox))
self._numHosts = int(maxHostID)
self._outMailLen = MAILBOX_SIZE * self._numHosts
self._monitorInterval = monitorInterval
# TODO: add support for multiple paths (multiple mailboxes)
self._outgoingMail = self._outMailLen * "\0"
self._incomingMail = self._outgoingMail
self._inCmd = ['dd',
'if=' + str(self._inbox),
'iflag=direct,fullblock',
'count=1'
]
self._outCmd = ['dd',
'of=' + str(self._outbox),
'oflag=direct',
'iflag=fullblock',
'conv=notrunc',
'count=1'
]
self._outLock = threading.Lock()
self._inLock = threading.Lock()
# Clear outgoing mail
self.log.debug("SPM_MailMonitor - clearing outgoing mail, command is: "
"%s", self._outCmd)
cmd = self._outCmd + ['bs=' + str(self._outMailLen)]
(rc, out, err) = _mboxExecCmd(cmd, data=self._outgoingMail)
if rc:
self.log.warning("SPM_MailMonitor couldn't clear outgoing mail, "
"dd failed")
self._thread = concurrent.thread(
self.run, name="mailbox-spm", log=self.log)
self._thread.start()
self.log.debug('SPM_MailMonitor created for pool %s' % self._poolID)
def wait(self, timeout=None):
self._thread.join(timeout=timeout)
return not self._thread.is_alive()
def stop(self):
self._stop = True
def isStopped(self):
return self._stopped
def getMaxHostID(self):
return self._numHosts
def setMaxHostID(self, newMaxId):
with self._inLock:
with self._outLock:
diff = newMaxId - self._numHosts
if diff > 0:
delta = MAILBOX_SIZE * diff * "\0"
self._outgoingMail += delta
self._incomingMail += delta
elif diff < 0:
delta = MAILBOX_SIZE * diff
self._outgoingMail = self._outgoingMail[:-delta]
self._incomingMail = self._incomingMail[:-delta]
self._numHosts = newMaxId
self._outMailLen = MAILBOX_SIZE * self._numHosts
@classmethod
def validateMailbox(self, mailbox, mailboxIndex):
"""
Return True if mailbox has a valid checksum, and is not an empty
mailbox, False otherwise.
"""
assert len(mailbox) == MAILBOX_SIZE
data = mailbox[:-CHECKSUM_BYTES]
checksum = mailbox[-CHECKSUM_BYTES:]
n = misc.checksum(data, CHECKSUM_BYTES)
expected = struct.pack('<l', n) # Assumes CHECKSUM_BYTES equals 4!!!
if checksum != expected:
self.log.error(
"mailbox %s checksum failed, not clearing mailbox, clearing "
"new mail (data=%r, checksum=%r, expected=%r)",
mailboxIndex, data, checksum, expected)
return False
elif expected == pZeroChecksum:
return False # Ignore messages of empty mailbox
return True
def _handleRequests(self, newMail):
send = False
# run through all messages and check if new messages have arrived
# (since last read)
for host in range(0, self._numHosts):
# Check mailbox checksum
mailboxStart = host * MAILBOX_SIZE
isMailboxValidated = False
for i in range(0, MESSAGES_PER_MAILBOX):
msgId = host * SLOTS_PER_MAILBOX + i
msgStart = msgId * MESSAGE_SIZE
# First byte of message is message version. Check message
# version, if 0 then message is empty and can be skipped
if newMail[msgStart] in ['\0', '0']:
continue
# Most mailboxes are probably empty so it costs less to check
# that all messages start with 0 than to validate the mailbox,
# therefor this is done after we find a non empty message in
# mailbox
if not isMailboxValidated:
if not self.validateMailbox(
newMail[mailboxStart:mailboxStart + MAILBOX_SIZE],
host):
# Cleaning invalid mbx in newMail
newMail = newMail[:mailboxStart] + EMPTYMAILBOX + \
newMail[mailboxStart + MAILBOX_SIZE:]
break
self.log.debug("SPM_MailMonitor: Mailbox %s validated, "
"checking mail", host)
isMailboxValidated = True
newMsg = newMail[msgStart:msgStart + MESSAGE_SIZE]
msgOffset = msgId * MESSAGE_SIZE
if newMsg == CLEAN_MESSAGE:
# Should probably put a setter on outgoingMail which would
# take the lock
self._outLock.acquire()
try:
self._outgoingMail = \
self._outgoingMail[0:msgOffset] + CLEAN_MESSAGE + \
self._outgoingMail[msgOffset + MESSAGE_SIZE:
self._outMailLen]
finally:
self._outLock.release()
send = True
continue
# Message isn't empty, check if its new
isMessageNew = False
for j in range(msgStart, msgStart + MESSAGE_SIZE):
if newMail[j] != self._incomingMail[j]:
isMessageNew = True
break
# If search exhausted, i.e. message hasn't changed since last
# read, it can be skipped
if not isMessageNew:
continue
# We only get here if there is a novel request
try:
msgType = newMail[msgStart + 1:msgStart + 5]
if msgType in self._messageTypes:
# Use message class to process request according to
# message specific logic
id = str(uuid.uuid4())
self.log.debug("SPM_MailMonitor: processing request: "
"%s" % repr(newMail[
msgStart:msgStart + MESSAGE_SIZE]))
res = self.tp.queueTask(
id, runTask, (self._messageTypes[msgType], msgId,
newMail[msgStart:
msgStart + MESSAGE_SIZE])
)
if not res:
raise Exception()
else:
self.log.error("SPM_MailMonitor: unknown message type "
"encountered: %s", msgType)
except RuntimeError as e:
self.log.error("SPM_MailMonitor: exception: %s caught "
"while handling message: %s", str(e),
newMail[msgStart:msgStart + MESSAGE_SIZE])
except:
self.log.error("SPM_MailMonitor: exception caught while "
"handling message: %s",
newMail[msgStart:msgStart + MESSAGE_SIZE],
exc_info=True)
self._incomingMail = newMail
return send
def _checkForMail(self):
# Lock is acquired in order to make sure that neither _numHosts nor
# incomingMail are changed during checkForMail
self._inLock.acquire()
try:
# self.log.debug("SPM_MailMonitor -_checking for mail")
cmd = self._inCmd + ['bs=' + str(self._outMailLen)]
# self.log.debug("SPM_MailMonitor - reading incoming mail, "
# "command: " + str(cmd))
(rc, in_mail, err) = misc.execCmd(cmd, raw=True)
if rc:
raise IOError(errno.EIO, "_handleRequests._checkForMail - "
"Could not read mailbox: %s" % self._inbox)
if (len(in_mail) != (self._outMailLen)):
self.log.error('SPM_MailMonitor: _checkForMail - dd succeeded '
'but read %d bytes instead of %d, cannot check '
'mail. Read mail contains: %s', len(in_mail),
self._outMailLen, repr(in_mail[:80]))
raise RuntimeError("_handleRequests._checkForMail - Could not "
"read mailbox")
# self.log.debug("Parsing inbox content: %s", in_mail)
if self._handleRequests(in_mail):
self._outLock.acquire()
try:
cmd = self._outCmd + ['bs=' + str(self._outMailLen)]
(rc, out, err) = _mboxExecCmd(cmd,
data=self._outgoingMail)
if rc:
self.log.warning("SPM_MailMonitor couldn't write "
"outgoing mail, dd failed")
finally:
self._outLock.release()
finally:
self._inLock.release()
def sendReply(self, msgID, msg):
# Lock is acquired in order to make sure that neither _numHosts nor
# outgoingMail are changed while used
self._outLock.acquire()
try:
msgOffset = msgID * MESSAGE_SIZE
self._outgoingMail = \
self._outgoingMail[0:msgOffset] + msg.payload + \
self._outgoingMail[msgOffset + MESSAGE_SIZE:self._outMailLen]
mailboxOffset = (msgID / SLOTS_PER_MAILBOX) * MAILBOX_SIZE
mailbox = self._outgoingMail[mailboxOffset:
mailboxOffset + MAILBOX_SIZE]
cmd = self._outCmd + ['bs=' + str(MAILBOX_SIZE),
'seek=' + str(mailboxOffset / MAILBOX_SIZE)]
# self.log.debug("Running command: %s, for message id: %s",
# str(cmd), str(msgID))
(rc, out, err) = _mboxExecCmd(cmd, data=mailbox)
if rc:
self.log.error("SPM_MailMonitor: sendReply - couldn't send "
"reply, dd failed")
finally:
self._outLock.release()
def run(self):
try:
while not self._stop:
try:
self._checkForMail()
except:
self.log.error("Error checking for mail", exc_info=True)
time.sleep(self._monitorInterval)
finally:
self._stopped = True
self.tp.joinAll(waitForTasks=False)
self.log.info("SPM_MailMonitor - Incoming mail monitoring thread "
"stopped")
def wait_timeout(monitor_interval):
"""
Designed to return 3 seconds wait timeout for monitor interval of 2
seconds, keeping the behaivor in runtime the same as it was in the last 8
years, while allowing shorter times for testing.
"""
return monitor_interval * 3 / 2
``` |
[
{
"content": "```python\n\"\"\"\nTest client connections to a XMPP chat room\n\"\"\"\nimport math\nimport time\nimport bisect\nimport logging\nimport random\nfrom threading import Thread\n\nimport pytest\n\nfrom echochamber.utils import create_client_connections, establish_channel, find_available_port\nfrom ech... | [
{
"content": "<|memory_start|>```python\n\"\"\"\nTest client connections to a XMPP chat room\n\"\"\"\nimport math\nimport time\nimport bisect\nimport logging\nimport random\nfrom threading import Thread\n\nimport pytest\n\nfrom echochamber.utils import create_client_connections, establish_channel, find_availabl... | ```python
"""
Test client connections to a XMPP chat room
"""
import math
import time
import bisect
import logging
import random
from threading import Thread
import pytest
from echochamber.utils import create_client_connections, establish_channel, find_available_port
from echochamber.proxy import ProxyServer
def read_messages(clients, counters, timeout):
def run(client):
now = time.time()
end = now + timeout
while now < end:
try:
client.read_message(end - now)
counters[client.username] += 1
now = time.time()
except Exception:
break
threads = []
for client in clients:
t = Thread(target=run, args=(client,))
t.start()
threads.append(t)
for t in threads:
t.join()
def read_rest_of_messages(clients, counters, total):
def run(client):
while counters[client.username] < total:
try:
client.read_message(5*60)
counters[client.username] += 1
except Exception:
break
threads = []
for client in clients:
username = client.username
t = Thread(target=run, args=(client,))
t.start()
threads.append((t, username))
success = True
for t, username in threads:
logging.info("Joining %s", username)
t.join()
messages_read = counters[username]
if messages_read != total:
success = False
logging.info("Client %s read only %d out of %d messages",
username, messages_read, total)
assert success
def connect_and_send_messages(client_factory, debug, num_clients, server_port=None):
total_time = 200 # Time period for sending all messages
frequency_high = 0.60 # 6 messages every 10 seconds
frequency_low = 0.10 # 1 message every 10 seconds
# threshhold = 300
percentage_high_users = 0.1
# Join all clients to the room
clients = create_client_connections(client_factory, num_clients, proxy_port=server_port)
establish_channel(clients)
logging.info("All clients have been invited to the channel, sending message tests")
num_high_users = int(math.ceil(num_clients * percentage_high_users))
high_users, low_users = clients[0:num_high_users], clients[num_high_users:]
logging.info("Chose %d high frequency messaging users and %d low frequency users.",
len(high_users), len(low_users))
message_queue = []
for client in clients:
if client in high_users:
msg_freq = frequency_high
else:
msg_freq = frequency_low
num_messages = int(total_time * msg_freq)
# Schedule each message to be sent by this client
for i in range(num_messages):
# Pick a random time in the total_time range to send the message
# bisect.insort will queue messages in the list ordered by scheduled time
queued_time = random.uniform(0, total_time)
bisect.insort_right(message_queue, (queued_time, client))
# Run a loop and send all queued messages at the schedule times
start_time = time.time()
message_id = 0
total_messages = len(message_queue)
ids = {}
recv_count = {}
for client in clients:
ids[client.username] = 0
recv_count[client.username] = 0
while message_queue:
# Check if first message is ready to be sent (we have reached the scheduled send time)
elapsed = time.time() - start_time
send_at = message_queue[0][0]
if elapsed >= send_at:
queued_time, client = message_queue.pop(0)
message_id += 1
logging.info("Sending message %d for %s queued at %0.2f",
message_id, client.username, queued_time)
client.send_message("{message_id} {time:0.2f} {username} {mid}".format(
message_id=message_id,
time=queued_time,
username=client.username,
mid=ids[client.username])
)
ids[client.username] += 1
else:
# Interestingly, using `time.sleep(send_at - elapsed)` here
# instead of read_messages will make the 10 node test pass
# on our Xeon test server while when read_messages is used
# the test fails. But when it fails and the log of each client
# is inspected it can be seen that all messages are actually
# received by jabberites. On the other hand, the 25 node test
# fail in both cases (when time.sleep and read_messages is
# used) but when read_messages is used, it can again be shown
# from the logs that all messages are actually received by
# jabberites but are lost somewhere in the pexpect library.
read_messages(clients, recv_count, send_at - elapsed)
# time.sleep(send_at - elapsed)
logging.info("Finished sending %d messages", total_messages)
# Wait for all messages to arrive
# NOTE: Reading from all clients at once seems to increase chances
# of receiving all the messages from pexpect.
read_rest_of_messages(clients, recv_count, total_messages)
logging.info("All clients received all sent messages")
@pytest.mark.parametrize("num_clients", [
10,
pytest.mark.skipif("os.environ.get('CI', None)")(25),
])
def test_messaging(client_factory, debug, num_clients):
"""
Test that clients connect and can send varying number of messages
"""
connect_and_send_messages(client_factory, debug, num_clients)
@pytest.mark.parametrize("num_clients", [
10,
pytest.mark.skipif("os.environ.get('CI', None)")(25),
])
def test_messaging_high_latency(xmpp_server, client_factory, debug, num_clients):
"""
Connect all clients via the latency proxy server
"""
latency_mean = 0.2
latency_variance = 0.025
proxy_port = find_available_port()
proxy = ProxyServer(("127.0.0.1", proxy_port), ("127.0.0.1", xmpp_server.c2s_port),
latency_mean, latency_variance)
logging.info("Proxy listening on port {} with latency mean {}s and variance {}s".
format(proxy_port, latency_mean, latency_variance))
# Join all clients to the room via a high-latency proxy
connect_and_send_messages(client_factory, debug, num_clients, server_port=proxy_port)
proxy.stop()
``` |
[
{
"content": "Here is the script:\n```python\nimport os\nimport sys\nimport unittest\nimport threading\n\n# add the source directory to the path so the unit test framework can find it\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'EPLaunchLite'))\n\ntry:\n from FileTypes im... | [
{
"content": "Here is the script:\n<|memory_start|>```python\nimport os\nimport sys\nimport unittest\nimport threading\n\n# add the source directory to the path so the unit test framework can find it\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'EPLaunchLite'))\n\ntry:\n f... | ```python
import os
import sys
import unittest
import threading
# add the source directory to the path so the unit test framework can find it
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'EPLaunchLite'))
try:
from FileTypes import FileTypes
has_gtk = True
except ImportError as e:
has_gtk = False
from EnergyPlusPath import EnergyPlusPath
from EnergyPlusThread import EnergyPlusThread
@unittest.skipIf(not has_gtk, "Cannot run FileTypes tests without gtk")
class TestFileTypes(unittest.TestCase):
def test_idf_file_type(self):
msg, filters = FileTypes.get_materials(FileTypes.IDF)
self.assertEqual(len(filters), 2) # should return 2: idf and imf
# make sure we have each one, idf and imf
idf_filters = [x for x in filters if 'IDF' in x.get_name()]
self.assertTrue(len(idf_filters), 1)
imf_filters = [x for x in filters if 'IMF' in x.get_name()]
self.assertTrue(len(imf_filters), 1)
def test_epw_file_type(self):
msg, filters = FileTypes.get_materials(FileTypes.EPW)
self.assertEqual(len(filters), 1)
epw_filters = [x for x in filters if 'EPW' in x.get_name()]
self.assertTrue(len(epw_filters), 1)
def test_invalid_file_type(self):
msg, result = FileTypes.get_materials('abcdef')
self.assertIsNone(msg)
self.assertIsNone(result)
class TestEnergyPlusPaths(unittest.TestCase):
def test_proper_path_no_trailing_slash(self):
eight_one = EnergyPlusPath.get_version_number_from_path('/Applications/EnergyPlus-8-1-0')
self.assertEqual(eight_one, '8-1-0')
def test_proper_path_with_trailing_slash(self):
eight_one = EnergyPlusPath.get_version_number_from_path('/Applications/EnergyPlus-8-1-0/')
self.assertEqual(eight_one, '8-1-0')
def test_bad_path_with_enough_tokens(self):
eight_one = EnergyPlusPath.get_version_number_from_path('/usr/local/EnergyPlus-8-1-0')
self.assertIsNone(eight_one)
def test_bad_path_not_enough_tokens(self):
with self.assertRaises(IndexError):
EnergyPlusPath.get_version_number_from_path('/EnergyPlus-8-1-0')
class TestEnergyPlusThread(unittest.TestCase):
def test_construction(self):
paths = ['/dummy/', '/path', '/to_nothing']
obj = EnergyPlusThread(paths[0], paths[1], paths[2], None, None, None, None)
self.assertTrue(isinstance(obj, threading.Thread))
self.assertTrue(obj.run_script, paths[0])
self.assertTrue(obj.input_file, paths[1])
self.assertTrue(obj.weather_file, paths[2])
# allow execution directly as python tests/test_ghx.py
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "Here is the source code:\n```python\n#! /usr/bin/env python\nfrom __future__ import division,print_function\nfrom lingpy.data.derive import compile_model\nfrom scipy.spatial.distance import squareform\nfrom time import sleep\nfrom pickle import dump\n\nasjp = {}\n\nscore = open('score','r').read()... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n#! /usr/bin/env python\nfrom __future__ import division,print_function\nfrom lingpy.data.derive import compile_model\nfrom scipy.spatial.distance import squareform\nfrom time import sleep\nfrom pickle import dump\n\nasjp = {}\n\nscore = open('sc... | ```python
#! /usr/bin/env python
from __future__ import division,print_function
from lingpy.data.derive import compile_model
from scipy.spatial.distance import squareform
from time import sleep
from pickle import dump
asjp = {}
score = open('score','r').read()
score = score.split('\n')
del score[-1]
dicto = {}
for line in score:
lin = line.split('\t')
dicto[lin[0]] = lin[1:]
letters = []
for i in range(len(score)):
score[i] = score[i].split('\t')
letters.append(score[i][0])
del score[i][0]
matrix = []
for i in range(len(score)):
for l in letters:
if i < len(dicto[l]):
matrix.append(float(dicto[l][i]))
matrix = squareform(matrix)
consonants = ['p'] + letters
consonant_matrix = matrix.copy()
score = open('vows_score','r').read()
score = score.split('\n')
del score[-1]
dicto = {}
for line in score:
lin = line.split('\t')
dicto[lin[0]] = lin[1:]
letters = []
for i in range(len(score)):
score[i] = score[i].split('\t')
letters.append(score[i][0])
del score[i][0]
matrix = []
for i in range(len(score)):
for l in letters:
if i < len(dicto[l]):
matrix.append(float(dicto[l][i]))
matrix = squareform(matrix)
vowel_matrix = matrix.copy()
vowels = ['i'] + letters
for i in range(len(vowel_matrix)):
vowel_matrix[i][i] = 40
for i in range(len(consonant_matrix)):
consonant_matrix[i][i] = 40
for i in range(31):
for j in range(31):
asjp[consonants[i],consonants[j]] = consonant_matrix[i][j]
for i in range(7):
for j in range(7):
asjp[vowels[i],vowels[j]] = vowel_matrix[i][j]
for l in vowels:
asjp[l,'X'] = 0
asjp['X',l] = 0
for l in consonants:
asjp[l,'X'] = 0
asjp['X',l] = 0
asjp['X','X'] = 0
for v in vowels:
for c in consonants:
asjp[v,c] = -20
asjp[c,v] = -20
for key in asjp.keys():
if asjp[key] == 0:
asjp[key] = 0
else:
asjp[key] = int(asjp[key]+0.5)
for v1 in vowels:
for v2 in vowels:
asjp[v1,v2] = int(asjp[v1,v2] * 0.25 + 0.5) + 10
asjp['i','y'] = -2
asjp['y','i'] = -2
asjp['u','w'] = -2
asjp['w','u'] = -2
asjp['u','v'] = -4
asjp['v','u'] = -4
asjp['u','f'] = -6
asjp['f','u'] = -6
keys = []
for keyA,keyB in asjp.keys():
keys.append((keyA,keyB))
for keyA,keyB in keys:
asjp[keyA,'+'] = -20
asjp['+',keyB] = -20
asjp[keyA,'0'] = 0
asjp['0',keyB] = 0
asjp['X','+'] = -5
asjp['+','X'] = -5
asjp['+','+'] = 0 # swaps
asjp['0','0'] = 0 # missing values
asjp['X','0'] = 0
asjp['0','X'] = 0
for i in '0123456':
for j in '0123456':
if i == j:
asjp[i,j] = 10
else:
asjp[i,j] = 5
keys = []
for keyA,keyB in asjp.keys():
keys.append((keyA,keyB))
for keyA,keyB in keys:
for i in '123456':
if keyA not in '123456' and keyB not in '123456':
asjp[keyA,i] = -20
asjp[i,keyB] = -20
asjp[keyA,'_'] = -50
asjp['_',keyB] = -50
asjp['_','_'] = 0
for x in asjp.keys():
asjp[x] = asjp[x] / 4.0
if asjp[x] > 0 and asjp[x] != 10:
asjp[x] += 0.75 * asjp[x]
elif asjp[x] < 0:
asjp[x] += 0.75 * asjp[x]
out = open('scorer.bin','wb')
dump(asjp,out)
out.close()
compile_model('asjp')
print("[i] Compilation of the ASJP model was successful!")
sleep(1)
``` |
[
{
"content": "Here is a code snippet:\n```python\n# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy o... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtai... | ```python
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Backup code.
"""
import tempfile
import mock
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import timeutils
from cinder.backup import manager
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.backup.fake_service_with_verify import\
get_backup_driver
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FakeBackupException(Exception):
pass
class BaseBackupTest(test.TestCase):
def setUp(self):
super(BaseBackupTest, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volumes_dir=vol_tmpdir)
with mock.patch("osprofiler.profiler.trace_cls") as mock_trace_cls:
side_effect = lambda value: value
mock_decorator = mock.MagicMock(side_effect=side_effect)
mock_trace_cls.return_value = mock_decorator
self.backup_mgr = \
importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.ctxt = context.get_admin_context()
self.backup_mgr.driver.set_initialized()
def _create_backup_db_entry(self, volume_id=1, display_name='test_backup',
display_description='this is a test backup',
container='volumebackups',
status='creating',
size=1,
object_count=0,
project_id='fake'):
"""Create a backup entry in the DB.
Return the entry ID
"""
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = 'fake'
backup['project_id'] = project_id
backup['host'] = 'testhost'
backup['availability_zone'] = '1'
backup['display_name'] = display_name
backup['display_description'] = display_description
backup['container'] = container
backup['status'] = status
backup['fail_reason'] = ''
backup['service'] = CONF.backup_driver
backup['size'] = size
backup['object_count'] = object_count
return db.backup_create(self.ctxt, backup)['id']
def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume',
status='backing-up',
size=1):
"""Create a volume entry in the DB.
Return the entry ID
"""
vol = {}
vol['size'] = size
vol['host'] = 'testhost'
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
return db.volume_create(self.ctxt, vol)['id']
def _create_exported_record_entry(self, vol_size=1):
"""Create backup metadata export entry."""
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
export = self.backup_mgr.export_record(self.ctxt, backup_id)
return export
def _create_export_record_db_entry(self,
volume_id='0000',
status='creating',
project_id='fake'):
"""Create a backup entry in the DB.
Return the entry ID
"""
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = 'fake'
backup['project_id'] = project_id
backup['status'] = status
return db.backup_create(self.ctxt, backup)['id']
class BackupTestCase(BaseBackupTest):
"""Test Case for backups."""
def test_init_host(self):
"""Make sure stuck volumes and backups are reset to correct
states when backup_manager.init_host() is called
"""
vol1_id = self._create_volume_db_entry(status='backing-up')
vol2_id = self._create_volume_db_entry(status='restoring-backup')
backup1_id = self._create_backup_db_entry(status='creating')
backup2_id = self._create_backup_db_entry(status='restoring')
backup3_id = self._create_backup_db_entry(status='deleting')
self.backup_mgr.init_host()
vol1 = db.volume_get(self.ctxt, vol1_id)
self.assertEqual(vol1['status'], 'available')
vol2 = db.volume_get(self.ctxt, vol2_id)
self.assertEqual(vol2['status'], 'error_restoring')
backup1 = db.backup_get(self.ctxt, backup1_id)
self.assertEqual(backup1['status'], 'error')
backup2 = db.backup_get(self.ctxt, backup2_id)
self.assertEqual(backup2['status'], 'available')
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup3_id)
def test_create_backup_with_bad_volume_status(self):
"""Test error handling when creating a backup from a volume
with a bad status
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.create_backup,
self.ctxt,
backup_id)
def test_create_backup_with_bad_backup_status(self):
"""Test error handling when creating a backup with a backup
with a bad status
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.create_backup,
self.ctxt,
backup_id)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_error(self, _mock_volume_backup):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
_mock_volume_backup.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
backup_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
self.assertTrue(_mock_volume_backup.called)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup(self, _mock_volume_backup):
"""Test normal backup creation."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
self.assertTrue(_mock_volume_backup.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_notify(self, _mock_volume_backup, notify):
"""Test normal backup creation with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup_id)
self.assertEqual(2, notify.call_count)
def test_restore_backup_with_bad_volume_status(self):
"""Test error handling when restoring a backup to a volume
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
def test_restore_backup_with_bad_backup_status(self):
"""Test error handling when restoring a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup_with_driver_error(self, _mock_volume_restore):
"""Test error handling when an error occurs during backup restore."""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
_mock_volume_restore.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error_restoring')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called)
def test_restore_backup_with_bad_service(self):
"""Test error handling when attempting a restore of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
service = 'cinder.tests.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup(self, _mock_volume_restore):
"""Test normal backup restoration."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup_with_notify(self, _mock_volume_restore, notify):
"""Test normal backup restoration with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
self.assertEqual(2, notify.call_count)
def test_delete_backup_with_bad_backup_status(self):
"""Test error handling when deleting a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_error(self):
"""Test error handling when an error occurs during backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
display_name='fail_on_delete',
volume_id=vol_id)
self.assertRaises(IOError,
self.backup_mgr.delete_backup,
self.ctxt,
backup_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_bad_service(self):
"""Test error handling when attempting a delete of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
service = 'cinder.tests.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_no_service(self):
"""Test error handling when attempting a delete of a backup
with no service defined for that backup, relates to bug #1162908
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
db.backup_update(self.ctxt, backup_id, {'service': None})
self.backup_mgr.delete_backup(self.ctxt, backup_id)
def test_delete_backup(self):
"""Test normal backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup_id)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup_id)
ctxt_read_deleted = context.get_admin_context('yes')
backup = db.backup_get(ctxt_read_deleted, backup_id)
self.assertEqual(backup.deleted, True)
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
self.assertEqual(backup.status, 'deleted')
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_delete_backup_with_notify(self, notify):
"""Test normal backup deletion with notifications."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup_id)
self.assertEqual(2, notify.call_count)
def test_list_backup(self):
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(len(backups), 0)
self._create_backup_db_entry()
b2 = self._create_backup_db_entry(project_id='project1')
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, b2)
def test_backup_get_all_by_project_with_deleted(self):
"""Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'.
"""
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 0)
backup_id_keep = self._create_backup_db_entry()
backup_id = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup_id)
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, backup_id_keep)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_project(ctxt_read_deleted, 'fake')
self.assertEqual(len(backups), 2)
def test_backup_get_all_by_host_with_deleted(self):
"""Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'
"""
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 0)
backup_id_keep = self._create_backup_db_entry()
backup_id = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup_id)
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, backup_id_keep)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost')
self.assertEqual(len(backups), 2)
def test_backup_manager_driver_name(self):
""""Test mapping between backup services and backup drivers."""
self.override_config('backup_driver', "cinder.backup.services.swift")
backup_mgr = \
importutils.import_object(CONF.backup_manager)
self.assertEqual('cinder.backup.drivers.swift',
backup_mgr.driver_name)
def test_export_record_with_bad_service(self):
"""Test error handling when attempting an export of a backup
record with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
service = 'cinder.tests.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup_id)
def test_export_record_with_bad_backup_status(self):
"""Test error handling when exporting a backup record with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup_id = self._create_backup_db_entry(status='error',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup_id)
def test_export_record(self):
"""Test normal backup record export."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
export = self.backup_mgr.export_record(self.ctxt, backup_id)
self.assertEqual(export['backup_service'], CONF.backup_driver)
self.assertTrue('backup_url' in export)
def test_import_record_with_verify_not_implemented(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
def test_import_record_with_bad_service(self):
"""Test error handling when attempting an import of a backup
record with a different service to that used to create the backup.
"""
export = self._create_exported_record_entry()
export['backup_service'] = 'cinder.tests.backup.bad_service'
imported_record = self._create_export_record_db_entry()
# Test the case where the additional hosts list is empty
backup_hosts = []
self.assertRaises(exception.ServiceNotFound,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
# Test that the import backup keeps calling other hosts to find a
# suitable host for the backup service
backup_hosts = ['fake1', 'fake2']
BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record'
with mock.patch(BackupAPI_import) as _mock_backup_import:
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_backup_import.called)
def test_import_record_with_invalid_backup(self):
"""Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
export = self._create_exported_record_entry()
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_record_import_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'import_record'))
imported_record = self._create_export_record_db_entry()
backup_hosts = []
with mock.patch(_mock_record_import_class) as _mock_record_import:
_mock_record_import.side_effect = FakeBackupException('fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_import.called)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'error')
class BackupTestCaseWithVerify(BaseBackupTest):
"""Test Case for backups."""
def setUp(self):
self.override_config("backup_driver",
"cinder.tests.backup.fake_service_with_verify")
super(BackupTestCaseWithVerify, self).setUp()
def test_import_record_with_verify(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver implements verify.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class):
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
def test_import_record_with_verify_invalid_backup(self):
"""Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as _mock_record_verify:
_mock_record_verify.side_effect = \
exception.InvalidBackup(reason='fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_verify.called)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_nonrestoring_to_available(
self):
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup_id = self._create_backup_db_entry(status='error',
volume_id=vol_id)
with mock.patch.object(manager.BackupManager,
'_map_service_to_driver') as \
mock_map_service_to_driver:
mock_map_service_to_driver.return_value = \
get_backup_driver(self.ctxt)
self.backup_mgr.reset_status(self.ctxt,
backup_id,
'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_available_invalid_backup(self):
volume = db.volume_create(self.ctxt, {'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'error',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as \
_mock_record_verify:
_mock_record_verify.side_effect = \
exception.BackupVerifyUnsupportedDriver(reason='fake')
self.assertRaises(exception.BackupVerifyUnsupportedDriver,
self.backup_mgr.reset_status,
self.ctxt,
backup['id'],
'available')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_restoring_to_available(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'restoring',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
self.backup_mgr.reset_status(self.ctxt,
backup['id'],
'available')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_error(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'creating',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
self.backup_mgr.reset_status(self.ctxt,
backup['id'],
'error')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'error')
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n\"\"\"Copyright 2019 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.a... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n\"\"\"Copyright 2019 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n ... | ```python
"""Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tensorflow as tf
import numpy as np
import random
from multiprocessing import dummy as multiprocessing
from absl import app
from absl import flags
from attributions import compute_and_save_attr
flags.DEFINE_integer('num_imgs', 10,
'Number of images to compute metrics over.')
flags.DEFINE_integer(
'num_threads', 50,
'Number of threads to use when perform images operations.')
flags.DEFINE_integer(
'seed', 0, 'The seed to use when randomly sample from a set of inputs.'
'Make sure sal_scratch is empty before changing the seed.')
flags.DEFINE_boolean(
'scratch', True,
'Compute metrics from scratch for num_imgs example inputs.')
flags.DEFINE_list('metrics', ['MCS', 'IDR', 'IIR'],
'List of metrics to evaluate.')
FLAGS = flags.FLAGS
ATTR_DIR = ['attr_reprod', 'attr_scratch']
METHOD_NAMES = [
'Vanilla Gradient', 'Gradient SmoothGrad', 'Guided Backprop',
'Guided Backprop SmoothGrad', 'Integrated Gradient',
'Integrated Gradient SmoothGrad', 'GradCam', 'GradCam SmoothGrad',
'Guided GradCam', 'Guided GradCam SmoothGrad', 'Gradient x Input',
'Gradient SmoothGrad x Input'
]
# Index into METHOD_NAMES for the desired method names. Ordered roughly
# in the best to worst performance under the BAM metric.
METHOD_INDICES = [6, 0, 1, 4, 5, 10, 2, 8]
# For MCS, IDR, and IIR, the configs correspond to the names of the model-data
# pair (e.g., model1, data1, model2, data2). For RMCS, the config corresponds
# to the prefix of the model name and the data name.
METRIC_CONFIG = {
'MCS': ['obj', 'obj', 'scene', 'scene'],
'IDR': ['scene', 'scene_only', 'scene', 'scene'],
'IIR': ['scene_only', 'bamboo_forest', 'scene_only', 'bamboo_forest_patch'],
'RMCS': ['scene', 'dog_bedroom']
}
# BAM models have 10 classes so there are 10 models for RMC testing.
NUM_RELATIVE_MODELS = 10
# BAM's dataset for IIR contains 100 images so sample up to 100.
MAX_SAMPLE_INDEX = 100
# MCS and IDR are evaluated on 10000 images, IIR and RMCS are on 100.
NUM_TOTAL_IMAGES = {
'MCS': 10000,
'IDR': 10000,
'IIR': 100,
'RMCS': 100,
}
BASE_DIR = os.getcwd()
def get_global_indices(metric):
"""When computing from scratch, generate random global image indices from a fixed seed.
When reproducing results given attributions, enumerate all indices of
NUM_TOTAL_IMAGES['metric'].
"""
if not FLAGS.scratch:
return range(NUM_TOTAL_IMAGES[metric])
random.seed(FLAGS.seed)
return sorted(random.sample(range(NUM_TOTAL_IMAGES[metric]), FLAGS.num_imgs))
def corr_indices(model, data, metric):
"""Given the name of a model and a set of data, return the indices of the images that are correctly classified by the model."""
label_fpath = os.path.join(BASE_DIR, 'data', data, 'val.txt')
labels = [
int(l.split(' ')[-1]) for l in tf.gfile.Open(label_fpath).readlines()
]
img_fnames = tf.gfile.ListDirectory(
os.path.join(BASE_DIR, ATTR_DIR[FLAGS.scratch], model + '-' + data))
preds = [int(p.split('_')[-1]) for p in sorted(img_fnames)]
attr_indices = range(len(preds))
if FLAGS.scratch:
attr_indices = range(FLAGS.num_imgs)
global_indices = get_global_indices(metric)
corr = [i for i in attr_indices if preds[i] == labels[global_indices[i]]]
return corr
def load_pos_neg_attr(model_pos, data_pos, model_neg, data_neg):
"""Load two sets of attributions from model_pos-data_pos and model_neg-data_neg.
Filter and only return attributions of correctly classified inputs.
Args:
model_pos: the model name for which objects have positive attributions.
data_pos: the data name for which objects have positive attributions.
model_neg: the model name for which objects have negative attributions.
data_neg: the data name for which objects have negative attributions.
Returns:
arrays of attributions corresponding to model_pos-data_pos and
model_neg-data_neg pairs.
"""
dir_pos = os.path.join(BASE_DIR, ATTR_DIR[FLAGS.scratch],
model_pos + '-' + data_pos)
pool = multiprocessing.Pool(FLAGS.num_threads)
attr_pos = np.array(
pool.map(
lambda f: np.load(tf.gfile.GFile(os.path.join(dir_pos, f), 'rb')),
sorted(tf.gfile.ListDirectory(dir_pos))))
dir_neg = os.path.join(BASE_DIR, ATTR_DIR[FLAGS.scratch],
model_neg + '-' + data_neg)
attr_neg = np.array(
pool.map(
lambda f: np.load(tf.gfile.GFile(os.path.join(dir_neg, f), 'rb')),
sorted(tf.gfile.ListDirectory(dir_neg))))
if FLAGS.scratch:
attr_pos = attr_pos[:FLAGS.num_imgs]
attr_neg = attr_neg[:FLAGS.num_imgs]
return attr_pos, attr_neg
def MCS(model_pos, data_pos, model_neg, data_neg, relative=False):
"""Compute the model contrast score defined as the average attribution
difference between model_pos-data_pos and model_neg-data_neg.
Args:
model_pos: the model name for which objects have positive attributions.
data_pos: the data name for which objects have positive attributions.
model_neg: the model name for which objects have negative attributions.
data_neg: the data name for which objects have negative attributions.
"""
metric = 'RMCS' if relative else 'MCS'
attr_pos, attr_neg = load_pos_neg_attr(model_pos, data_pos, model_neg,
data_neg)
corr_pos = corr_indices(model_pos, data_pos, metric)
corr_neg = corr_indices(model_neg, data_neg, metric)
corr = [i for i in corr_pos if i in corr_neg]
for j in METHOD_INDICES:
print(','.join(
map(str, [METHOD_NAMES[j],
np.mean((attr_pos - attr_neg)[corr, j])])))
def IDR(model, data_pos, data_neg):
"""Compute the input dependence rate defined as the percentage of examples where data_pos is attributed higher than data_neg.
Args:
model: name of the model being evaluated.
data_pos: the data name for positive attributions.
data_neg: the data name for negative attributions.
"""
attr_pos, attr_neg = load_pos_neg_attr(model, data_pos, model, data_neg)
corr_pos = corr_indices(model, data_pos, 'IDR')
corr_neg = corr_indices(model, data_neg, 'IDR')
corr = [i for i in corr_pos if i in corr_neg]
for j in METHOD_INDICES:
count = sum(d > 0 for d in (attr_pos - attr_neg)[corr, j])
print(','.join(map(str, [METHOD_NAMES[j], count / float(len(corr))])))
def IIR(model, data, data_patch, threashold=0.1):
"""Compute the input independence rate defined as the percentage of examples where the difference between data and data_patch is less than threshold.
Args:
model: name of the model being evaluated.
data: name of the data directory that contains scene-only images.
data_patch: name of the data directory that contains functionally
insignificant object patches.
"""
attr, attr_patch = load_pos_neg_attr(model, data, model, data_patch)
corr = corr_indices(model, data, 'IIR')
corr_patch = corr_indices(model, data_patch, 'IIR')
corr = [i for i in corr if i in corr_patch]
for j in METHOD_INDICES:
diff = abs(attr[corr, j] - attr_patch[corr, j])
count = sum(diff < threashold * attr[corr, j])
print(','.join(map(str, [METHOD_NAMES[j], count / float(len(corr))])))
def main(argv):
for metric in FLAGS.metrics:
print('Results for {}:'.format(metric))
global_indices = get_global_indices(metric)
if metric == 'RMCS':
model_prefix, data = METRIC_CONFIG[metric]
if FLAGS.scratch:
for i in range(1, NUM_RELATIVE_MODELS + 1):
compute_and_save_attr(model_prefix + str(i), data, global_indices,
FLAGS.num_threads)
for i in range(1, NUM_RELATIVE_MODELS):
print('MCS between', model_prefix + str(i), 'and',
model_prefix + str(NUM_RELATIVE_MODELS))
MCS(model_prefix + str(i), data,
model_prefix + str(NUM_RELATIVE_MODELS), data, relative=True)
else:
model1, data1, model2, data2 = METRIC_CONFIG[metric]
if FLAGS.scratch:
compute_and_save_attr(model1, data1, global_indices, FLAGS.num_threads)
compute_and_save_attr(model2, data2, global_indices, FLAGS.num_threads)
if metric == 'MCS':
MCS(model1, data1, model2, data2)
if metric == 'IDR':
IDR(model1, data1, data2)
if metric == 'IIR':
IIR(model1, data1, data2)
if __name__ == '__main__':
app.run(main)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\nfrom __future__ import unicode_literals\nimport boto\nimport boto3\nimport boto.ec2.autoscale\nfrom boto.ec2.autoscale.launchconfig import LaunchConfiguration\nfrom boto.ec2.autoscale.group import AutoScalingGroup\nfrom boto.ec2.autoscale impor... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\nfrom __future__ import unicode_literals\nimport boto\nimport boto3\nimport boto.ec2.autoscale\nfrom boto.ec2.autoscale.launchconfig import LaunchConfiguration\nfrom boto.ec2.autoscale.group import AutoScalingGroup\nfrom boto.ec2... | ```python
from __future__ import unicode_literals
import boto
import boto3
import boto.ec2.autoscale
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.group import AutoScalingGroup
from boto.ec2.autoscale import Tag
import boto.ec2.elb
import sure # noqa
from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_autoscaling_deprecated, mock_ec2
from tests.helpers import requires_boto_gte
@mock_autoscaling_deprecated
@mock_elb_deprecated
def test_create_autoscaling_group():
elb_conn = boto.ec2.elb.connect_to_region('us-east-1')
elb_conn.create_load_balancer(
'test_lb', zones=[], listeners=[(80, 8080, 'http')])
conn = boto.ec2.autoscale.connect_to_region('us-east-1')
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
default_cooldown=60,
desired_capacity=2,
health_check_period=100,
health_check_type="EC2",
max_size=2,
min_size=2,
launch_config=config,
load_balancers=["test_lb"],
placement_group="test_placement",
vpc_zone_identifier='subnet-1234abcd',
termination_policies=["OldestInstance", "NewestInstance"],
tags=[Tag(
resource_id='tester_group',
key='test_key',
value='test_value',
propagate_at_launch=True
)
],
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.name.should.equal('tester_group')
set(group.availability_zones).should.equal(
set(['us-east-1c', 'us-east-1b']))
group.desired_capacity.should.equal(2)
group.max_size.should.equal(2)
group.min_size.should.equal(2)
group.instances.should.have.length_of(2)
group.vpc_zone_identifier.should.equal('subnet-1234abcd')
group.launch_config_name.should.equal('tester')
group.default_cooldown.should.equal(60)
group.health_check_period.should.equal(100)
group.health_check_type.should.equal("EC2")
list(group.load_balancers).should.equal(["test_lb"])
group.placement_group.should.equal("test_placement")
list(group.termination_policies).should.equal(
["OldestInstance", "NewestInstance"])
len(list(group.tags)).should.equal(1)
tag = list(group.tags)[0]
tag.resource_id.should.equal('tester_group')
tag.key.should.equal('test_key')
tag.value.should.equal('test_value')
tag.propagate_at_launch.should.equal(True)
@mock_autoscaling_deprecated
def test_create_autoscaling_groups_defaults():
""" Test with the minimum inputs and check that all of the proper defaults
are assigned for the other attributes """
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.name.should.equal('tester_group')
group.max_size.should.equal(2)
group.min_size.should.equal(2)
group.launch_config_name.should.equal('tester')
# Defaults
list(group.availability_zones).should.equal([])
group.desired_capacity.should.equal(2)
group.vpc_zone_identifier.should.equal('')
group.default_cooldown.should.equal(300)
group.health_check_period.should.equal(300)
group.health_check_type.should.equal("EC2")
list(group.load_balancers).should.equal([])
group.placement_group.should.equal(None)
list(group.termination_policies).should.equal([])
list(group.tags).should.equal([])
@mock_autoscaling
def test_list_many_autoscaling_groups():
conn = boto3.client('autoscaling', region_name='us-east-1')
conn.create_launch_configuration(LaunchConfigurationName='TestLC')
for i in range(51):
conn.create_auto_scaling_group(AutoScalingGroupName='TestGroup%d' % i,
MinSize=1,
MaxSize=2,
LaunchConfigurationName='TestLC')
response = conn.describe_auto_scaling_groups()
groups = response["AutoScalingGroups"]
marker = response["NextToken"]
groups.should.have.length_of(50)
marker.should.equal(groups[-1]['AutoScalingGroupName'])
response2 = conn.describe_auto_scaling_groups(NextToken=marker)
groups.extend(response2["AutoScalingGroups"])
groups.should.have.length_of(51)
assert 'NextToken' not in response2.keys()
@mock_autoscaling
@mock_ec2
def test_list_many_autoscaling_groups():
conn = boto3.client('autoscaling', region_name='us-east-1')
conn.create_launch_configuration(LaunchConfigurationName='TestLC')
conn.create_auto_scaling_group(AutoScalingGroupName='TestGroup1',
MinSize=1,
MaxSize=2,
LaunchConfigurationName='TestLC',
Tags=[{
"ResourceId": 'TestGroup1',
"ResourceType": "auto-scaling-group",
"PropagateAtLaunch": True,
"Key": 'TestTagKey1',
"Value": 'TestTagValue1'
}])
ec2 = boto3.client('ec2', region_name='us-east-1')
instances = ec2.describe_instances()
tags = instances['Reservations'][0]['Instances'][0]['Tags']
tags.should.contain({u'Value': 'TestTagValue1', u'Key': 'TestTagKey1'})
tags.should.contain({u'Value': 'TestGroup1', u'Key': 'aws:autoscaling:groupName'})
@mock_autoscaling_deprecated
def test_autoscaling_group_describe_filter():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
)
conn.create_auto_scaling_group(group)
group.name = 'tester_group2'
conn.create_auto_scaling_group(group)
group.name = 'tester_group3'
conn.create_auto_scaling_group(group)
conn.get_all_groups(
names=['tester_group', 'tester_group2']).should.have.length_of(2)
conn.get_all_groups().should.have.length_of(3)
@mock_autoscaling_deprecated
def test_autoscaling_update():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
launch_config=config,
vpc_zone_identifier='subnet-1234abcd',
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.vpc_zone_identifier.should.equal('subnet-1234abcd')
group.vpc_zone_identifier = 'subnet-5678efgh'
group.update()
group = conn.get_all_groups()[0]
group.vpc_zone_identifier.should.equal('subnet-5678efgh')
@mock_autoscaling_deprecated
def test_autoscaling_tags_update():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
launch_config=config,
vpc_zone_identifier='subnet-1234abcd',
tags=[Tag(
resource_id='tester_group',
key='test_key',
value='test_value',
propagate_at_launch=True
)],
)
conn.create_auto_scaling_group(group)
conn.create_or_update_tags(tags=[Tag(
resource_id='tester_group',
key='test_key',
value='new_test_value',
propagate_at_launch=True
), Tag(
resource_id='tester_group',
key='test_key2',
value='test_value2',
propagate_at_launch=True
)])
group = conn.get_all_groups()[0]
group.tags.should.have.length_of(2)
@mock_autoscaling_deprecated
def test_autoscaling_group_delete():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
)
conn.create_auto_scaling_group(group)
conn.get_all_groups().should.have.length_of(1)
conn.delete_auto_scaling_group('tester_group')
conn.get_all_groups().should.have.length_of(0)
@mock_ec2_deprecated
@mock_autoscaling_deprecated
def test_autoscaling_group_describe_instances():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
)
conn.create_auto_scaling_group(group)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(2)
instances[0].launch_config_name.should.equal('tester')
autoscale_instance_ids = [instance.instance_id for instance in instances]
ec2_conn = boto.connect_ec2()
reservations = ec2_conn.get_all_instances()
instances = reservations[0].instances
instances.should.have.length_of(2)
instance_ids = [instance.id for instance in instances]
set(autoscale_instance_ids).should.equal(set(instance_ids))
instances[0].instance_type.should.equal("t2.medium")
@requires_boto_gte("2.8")
@mock_autoscaling_deprecated
def test_set_desired_capacity_up():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
launch_config=config,
vpc_zone_identifier='subnet-1234abcd',
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(2)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(2)
conn.set_desired_capacity("tester_group", 3)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(3)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(3)
@requires_boto_gte("2.8")
@mock_autoscaling_deprecated
def test_set_desired_capacity_down():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
launch_config=config,
vpc_zone_identifier='subnet-1234abcd',
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(2)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(2)
conn.set_desired_capacity("tester_group", 1)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(1)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(1)
@requires_boto_gte("2.8")
@mock_autoscaling_deprecated
def test_set_desired_capacity_the_same():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
launch_config=config,
vpc_zone_identifier='subnet-1234abcd',
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(2)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(2)
conn.set_desired_capacity("tester_group", 2)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(2)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(2)
@mock_autoscaling_deprecated
@mock_elb_deprecated
def test_autoscaling_group_with_elb():
elb_conn = boto.connect_elb()
zones = ['us-east-1a', 'us-east-1b']
ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
lb = elb_conn.create_load_balancer('my-lb', zones, ports)
instances_health = elb_conn.describe_instance_health('my-lb')
instances_health.should.be.empty
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
load_balancers=["my-lb"],
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
elb = elb_conn.get_all_load_balancers()[0]
group.desired_capacity.should.equal(2)
elb.instances.should.have.length_of(2)
autoscale_instance_ids = set(
instance.instance_id for instance in group.instances)
elb_instace_ids = set(instance.id for instance in elb.instances)
autoscale_instance_ids.should.equal(elb_instace_ids)
conn.set_desired_capacity("tester_group", 3)
group = conn.get_all_groups()[0]
elb = elb_conn.get_all_load_balancers()[0]
group.desired_capacity.should.equal(3)
elb.instances.should.have.length_of(3)
autoscale_instance_ids = set(
instance.instance_id for instance in group.instances)
elb_instace_ids = set(instance.id for instance in elb.instances)
autoscale_instance_ids.should.equal(elb_instace_ids)
conn.delete_auto_scaling_group('tester_group')
conn.get_all_groups().should.have.length_of(0)
elb = elb_conn.get_all_load_balancers()[0]
elb.instances.should.have.length_of(0)
'''
Boto3
'''
@mock_autoscaling
def test_create_autoscaling_group_boto3():
client = boto3.client('autoscaling', region_name='us-east-1')
_ = client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
response = client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=20,
DesiredCapacity=5,
Tags=[
{'ResourceId': 'test_asg',
'ResourceType': 'auto-scaling-group',
'Key': 'propogated-tag-key',
'Value': 'propogate-tag-value',
'PropagateAtLaunch': True
},
{'ResourceId': 'test_asg',
'ResourceType': 'auto-scaling-group',
'Key': 'not-propogated-tag-key',
'Value': 'not-propogate-tag-value',
'PropagateAtLaunch': False
}]
)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
@mock_autoscaling
def test_describe_autoscaling_groups_boto3():
client = boto3.client('autoscaling', region_name='us-east-1')
_ = client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
_ = client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=20,
DesiredCapacity=5
)
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=["test_asg"]
)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response['AutoScalingGroups'][0][
'AutoScalingGroupName'].should.equal('test_asg')
@mock_autoscaling
def test_update_autoscaling_group_boto3():
client = boto3.client('autoscaling', region_name='us-east-1')
_ = client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
_ = client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=20,
DesiredCapacity=5
)
response = client.update_auto_scaling_group(
AutoScalingGroupName='test_asg',
MinSize=1,
)
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=["test_asg"]
)
response['AutoScalingGroups'][0]['MinSize'].should.equal(1)
@mock_autoscaling
def test_autoscaling_taqs_update_boto3():
client = boto3.client('autoscaling', region_name='us-east-1')
_ = client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
_ = client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=20,
DesiredCapacity=5,
Tags=[
{
"ResourceId": 'test_asg',
"Key": 'test_key',
"Value": 'test_value',
"PropagateAtLaunch": True
},
]
)
client.create_or_update_tags(Tags=[{
"ResourceId": 'test_asg',
"Key": 'test_key',
"Value": 'updated_test_value',
"PropagateAtLaunch": True
}, {
"ResourceId": 'test_asg',
"Key": 'test_key2',
"Value": 'test_value2',
"PropagateAtLaunch": False
}])
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=["test_asg"]
)
response['AutoScalingGroups'][0]['Tags'].should.have.length_of(2)
@mock_autoscaling
def test_autoscaling_describe_policies_boto3():
client = boto3.client('autoscaling', region_name='us-east-1')
_ = client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
_ = client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=20,
DesiredCapacity=5,
Tags=[{
"ResourceId": 'test_asg',
"Key": 'test_key',
"Value": 'test_value',
"PropagateAtLaunch": True
}]
)
client.put_scaling_policy(
AutoScalingGroupName='test_asg',
PolicyName='test_policy_down',
PolicyType='SimpleScaling',
AdjustmentType='PercentChangeInCapacity',
ScalingAdjustment=-10,
Cooldown=60,
MinAdjustmentMagnitude=1)
client.put_scaling_policy(
AutoScalingGroupName='test_asg',
PolicyName='test_policy_up',
PolicyType='SimpleScaling',
AdjustmentType='PercentChangeInCapacity',
ScalingAdjustment=10,
Cooldown=60,
MinAdjustmentMagnitude=1)
response = client.describe_policies()
response['ScalingPolicies'].should.have.length_of(2)
response = client.describe_policies(AutoScalingGroupName='test_asg')
response['ScalingPolicies'].should.have.length_of(2)
response = client.describe_policies(PolicyTypes=['StepScaling'])
response['ScalingPolicies'].should.have.length_of(0)
response = client.describe_policies(
AutoScalingGroupName='test_asg',
PolicyNames=['test_policy_down'],
PolicyTypes=['SimpleScaling']
)
response['ScalingPolicies'].should.have.length_of(1)
response['ScalingPolicies'][0][
'PolicyName'].should.equal('test_policy_down')
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n# Copyright (C) 2015 Simon Biggs\n# This program is free software: you can redistribute it and/or\n# modify it under the terms of the GNU Affero General Public\n# License as published by the Free Software Foundation, either\n# version 3 of the ... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n# Copyright (C) 2015 Simon Biggs\n# This program is free software: you can redistribute it and/or\n# modify it under the terms of the GNU Affero General Public\n# License as published by the Free Software Foundation, either\n# v... | ```python
# Copyright (C) 2015 Simon Biggs
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# http://www.gnu.org/licenses/.
import numpy as np
from electronfactors.ellipse.equivalent import poi_distance_method
def test_centre_of_square():
XCoords = np.array([-3, 3, 3, -3])
YCoords = np.array([3, 3, -3, -3])
poi = poi_distance_method(
XCoords=XCoords, YCoords=YCoords
)
assert np.abs(poi[0]) < 0.1
assert np.abs(poi[1]) < 0.1
def test_centre_of_arbitrary_cutout():
XCoords = np.array([-1, -0.2, 0, 0.7, 1, 0]) * 4 + 1
YCoords = np.array([0, -1, -.8, 0, .6, 1]) * 4 - 1
poi = poi_distance_method(
XCoords=XCoords, YCoords=YCoords
)
assert np.abs(poi[0] - 0.92) < 0.1
assert np.abs(poi[1] + 0.62) < 0.1
``` |
[
{
"content": "Replicate the source code:\n```python\n\"\"\"\r\nBasic building blocks for generic class based views.\r\n\r\nWe don't bind behaviour to http method handlers yet,\r\nwhich allows mixin classes to be composed in interesting ways.\r\n\"\"\"\r\nfrom __future__ import unicode_literals\r\n\r\nfrom djang... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n\"\"\"\r\nBasic building blocks for generic class based views.\r\n\r\nWe don't bind behaviour to http method handlers yet,\r\nwhich allows mixin classes to be composed in interesting ways.\r\n\"\"\"\r\nfrom __future__ import unicode_literals\r... | ```python
"""
Basic building blocks for generic class based views.
We don't bind behaviour to http method handlers yet,
which allows mixin classes to be composed in interesting ways.
"""
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.http import Http404
from rest_framework import status
from rest_framework.response import Response
from rest_framework.request import clone_request
from rest_framework.settings import api_settings
import warnings
def _get_validation_exclusions(obj, pk=None, slug_field=None, lookup_field=None):
"""
Given a model instance, and an optional pk and slug field,
return the full list of all other field names on that model.
For use when performing full_clean on a model instance,
so we only clean the required fields.
"""
include = []
if pk:
# Pending deprecation
pk_field = obj._meta.pk
while pk_field.rel:
pk_field = pk_field.rel.to._meta.pk
include.append(pk_field.name)
if slug_field:
# Pending deprecation
include.append(slug_field)
if lookup_field and lookup_field != 'pk':
include.append(lookup_field)
return [field.name for field in obj._meta.fields if field.name not in include]
class CreateModelMixin(object):
"""
Create a model instance.
"""
def create(self, request, *args, **kwargs):
data = dict(request.DATA)
data.update(**kwargs)
serializer = self.get_serializer(data=data, files=request.FILES)
if serializer.is_valid():
self.pre_save(serializer.object)
self.object = serializer.save(force_insert=True)
self.post_save(self.object, created=True)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED,
headers=headers)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get_success_headers(self, data):
try:
return {'Location': data[api_settings.URL_FIELD_NAME]}
except (TypeError, KeyError):
return {}
class ListModelMixin(object):
"""
List a queryset.
"""
empty_error = "Empty list and '%(class_name)s.allow_empty' is False."
def list(self, request, *args, **kwargs):
self.object_list = self.filter_queryset(self.get_queryset())
# Default is to allow empty querysets. This can be altered by setting
# `.allow_empty = False`, to raise 404 errors on empty querysets.
if not self.allow_empty and not self.object_list:
warnings.warn(
'The `allow_empty` parameter is due to be deprecated. '
'To use `allow_empty=False` style behavior, You should override '
'`get_queryset()` and explicitly raise a 404 on empty querysets.',
PendingDeprecationWarning
)
class_name = self.__class__.__name__
error_msg = self.empty_error % {'class_name': class_name}
raise Http404(error_msg)
# Switch between paginated or standard style responses
page = self.paginate_queryset(self.object_list)
if page is not None:
serializer = self.get_pagination_serializer(page)
else:
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data)
class RetrieveModelMixin(object):
"""
Retrieve a model instance.
"""
def retrieve(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = self.get_serializer(self.object)
return Response(serializer.data)
class UpdateModelMixin(object):
"""
Update a model instance.
"""
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
self.object = self.get_object_or_none()
serializer = self.get_serializer(self.object, data=request.DATA,
files=request.FILES, partial=partial)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
try:
self.pre_save(serializer.object)
except ValidationError as err:
# full_clean on model instance may be called in pre_save,
# so we have to handle eventual errors.
return Response(err.message_dict, status=status.HTTP_400_BAD_REQUEST)
if self.object is None:
self.object = serializer.save(force_insert=True)
self.post_save(self.object, created=True)
return Response(serializer.data, status=status.HTTP_201_CREATED)
self.object = serializer.save(force_update=True)
self.post_save(self.object, created=False)
return Response(serializer.data, status=status.HTTP_200_OK)
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
def get_object_or_none(self):
try:
return self.get_object()
except Http404:
if self.request.method == 'PUT':
# For PUT-as-create operation, we need to ensure that we have
# relevant permissions, as if this was a POST request. This
# will either raise a PermissionDenied exception, or simply
# return None.
self.check_permissions(clone_request(self.request, 'POST'))
else:
# PATCH requests where the object does not exist should still
# return a 404 response.
raise
def pre_save(self, obj):
"""
Set any attributes on the object that are implicit in the request.
"""
# pk and/or slug attributes are implicit in the URL.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
lookup = self.kwargs.get(lookup_url_kwarg, None)
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
slug_field = slug and self.slug_field or None
if lookup:
setattr(obj, self.lookup_field, lookup)
if pk:
setattr(obj, 'pk', pk)
if slug:
setattr(obj, slug_field, slug)
# Ensure we clean the attributes so that we don't eg return integer
# pk using a string representation, as provided by the url conf kwarg.
if hasattr(obj, 'full_clean'):
exclude = _get_validation_exclusions(obj, pk, slug_field, self.lookup_field)
obj.full_clean(exclude)
class DestroyModelMixin(object):
"""
Destroy a model instance.
"""
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
self.pre_delete(obj)
obj.delete()
self.post_delete(obj)
return Response(status=status.HTTP_204_NO_CONTENT)
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\nimport os\n\n\ndef test_001_basic(settings, inspector):\n \"\"\"Looking for parents of basic sample\"\"\"\n sources = [\n os.path.join(settings.sample_path, 'main_basic.scss'),\n os.path.join(settings.sample_path, 'main_depth_import-3.scss'),... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport os\n\n\ndef test_001_basic(settings, inspector):\n \"\"\"Looking for parents of basic sample\"\"\"\n sources = [\n os.path.join(settings.sample_path, 'main_basic.scss'),\n os.path.join(settings.sample_path, 'main_depth_... | ```python
# -*- coding: utf-8 -*-
import os
def test_001_basic(settings, inspector):
"""Looking for parents of basic sample"""
sources = [
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
]
sourcepath = os.path.join(settings.sample_path, 'main_basic.scss')
inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)
parents = inspector.parents(sourcepath)
assert parents == set([
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
])
def test_002_vendor(settings, inspector):
"""Looking for parents of vendor component"""
sources = [
os.path.join(settings.sample_path, 'main_syntax.scss'),
os.path.join(settings.sample_path, 'main_commented.scss'),
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
os.path.join(settings.sample_path, 'main_circular_0.scss'),
os.path.join(settings.sample_path, 'main_circular_1.scss'),
os.path.join(settings.sample_path, 'main_circular_2.scss'),
os.path.join(settings.sample_path, 'main_circular_3.scss'),
os.path.join(settings.sample_path, 'main_circular_4.scss'),
os.path.join(settings.sample_path, 'main_circular_bridge.scss'),
os.path.join(settings.sample_path, 'main_circular_5.scss'),
]
sourcepath = os.path.join(settings.sample_path, '_vendor.scss')
inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)
parents = inspector.parents(sourcepath)
assert parents == set([
os.path.join(settings.sample_path, '_sass_filetest.sass'),
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_circular_4.scss'),
os.path.join(settings.sample_path, 'main_circular_5.scss'),
os.path.join(settings.sample_path, 'main_circular_bridge.scss'),
os.path.join(settings.sample_path, 'main_commented.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_syntax.scss'),
os.path.join(settings.sample_path, 'main_circular_3.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
])
def test_003_library(settings, inspector):
"""Looking for parents of a library component"""
sources = [
os.path.join(settings.sample_path, 'main_syntax.scss'),
os.path.join(settings.sample_path, 'main_commented.scss'),
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
os.path.join(settings.sample_path, 'main_circular_0.scss'),
os.path.join(settings.sample_path, 'main_circular_1.scss'),
os.path.join(settings.sample_path, 'main_circular_2.scss'),
os.path.join(settings.sample_path, 'main_circular_3.scss'),
os.path.join(settings.sample_path, 'main_circular_4.scss'),
os.path.join(settings.sample_path, 'main_circular_bridge.scss'),
os.path.join(settings.sample_path, 'main_circular_5.scss'),
]
sourcepath = os.path.join(settings.lib1_path, 'components/_panels.scss')
inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)
parents = inspector.parents(sourcepath)
assert parents == set([
os.path.join(settings.lib1_path, 'library_1_fullstack.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
])
``` |
[
{
"content": "Here is the script:\n```python\n# -*- coding: utf-8 -*-\nimport datetime\n\nfrom openerp import http\nfrom openerp.http import request\nfrom openerp import tools\nfrom openerp.tools.translate import _\n\n\nclass website_account(http.Controller):\n @http.route(['/my', '/my/home'], type='http', a... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport datetime\n\nfrom openerp import http\nfrom openerp.http import request\nfrom openerp import tools\nfrom openerp.tools.translate import _\n\n\nclass website_account(http.Controller):\n @http.route(['/my', '/my/home']... | ```python
# -*- coding: utf-8 -*-
import datetime
from openerp import http
from openerp.http import request
from openerp import tools
from openerp.tools.translate import _
class website_account(http.Controller):
@http.route(['/my', '/my/home'], type='http', auth="public", website=True)
def account(self):
partner = request.env.user.partner_id
values = {
'date': datetime.date.today().strftime('%Y-%m-%d')
}
res_sale_order = request.env['sale.order']
res_invoices = request.env['account.invoice']
quotations = res_sale_order.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['sent', 'cancel'])
])
orders = res_sale_order.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['progress', 'manual', 'shipping_except', 'invoice_except', 'done'])
])
invoices = res_invoices.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['open', 'paid', 'cancelled'])
])
values.update({
'quotations': quotations,
'orders': orders,
'invoices': invoices
})
# get customer sales rep
if partner.user_id:
sales_rep = partner.user_id
else:
sales_rep = False
values.update({
'sales_rep': sales_rep,
'company': request.website.company_id,
'user': request.env.user
})
return request.website.render("website_portal.account", values)
@http.route(['/my/orders/<int:order>'], type='http', auth="user", website=True)
def orders_followup(self, order=None):
partner = request.env['res.users'].browse(request.uid).partner_id
domain = [
('partner_id.id', '=', partner.id),
('state', 'not in', ['draft', 'cancel']),
('id', '=', order)
]
order = request.env['sale.order'].search(domain)
invoiced_lines = request.env['account.invoice.line'].search([('invoice_id', 'in', order.invoice_ids.ids)])
order_invoice_lines = {il.product_id.id: il.invoice_id for il in invoiced_lines}
return request.website.render("website_portal.orders_followup", {
'order': order.sudo(),
'order_invoice_lines': order_invoice_lines,
})
@http.route(['/my/account'], type='http', auth='user', website=True)
def details(self, redirect=None, **post):
partner = request.env['res.users'].browse(request.uid).partner_id
values = {
'error': {},
'error_message': []
}
if post:
error, error_message = self.details_form_validate(post)
values.update({'error': error, 'error_message': error_message})
values.update(post)
if not error:
post.update({'zip': post.pop('zipcode', '')})
partner.sudo().write(post)
if redirect:
return request.redirect(redirect)
return request.redirect('/my/home')
countries = request.env['res.country'].sudo().search([])
states = request.env['res.country.state'].sudo().search([])
values.update({
'partner': partner,
'countries': countries,
'states': states,
'has_check_vat': hasattr(request.env['res.partner'], 'check_vat'),
'redirect': redirect,
})
return request.website.render("website_portal.details", values)
def details_form_validate(self, data):
error = dict()
error_message = []
mandatory_billing_fields = ["name", "phone", "email", "street2", "city", "country_id"]
# Validation
for field_name in mandatory_billing_fields:
if not data.get(field_name):
error[field_name] = 'missing'
# email validation
if data.get('email') and not tools.single_email_re.match(data.get('email')):
error["email"] = 'error'
error_message.append(_('Invalid Email! Please enter a valid email address.'))
# vat validation
if data.get("vat") and hasattr(request.env["res.partner"], "check_vat"):
if request.website.company_id.vat_check_vies:
# force full VIES online check
check_func = request.env["res.partner"].vies_vat_check
else:
# quick and partial off-line checksum validation
check_func = request.env["res.partner"].simple_vat_check
vat_country, vat_number = request.env["res.partner"]._split_vat(data.get("vat"))
if not check_func(vat_country, vat_number): # simple_vat_check
error["vat"] = 'error'
# error message for empty required fields
if [err for err in error.values() if err == 'missing']:
error_message.append(_('Some required fields are empty.'))
return error, error_message
``` |
[
{
"content": "```python\n# Abstract syntax tree for CODA IDL\n\n# ============================================================================\n# Scopes\n# ============================================================================\n\nclass DefScope:\n 'Mixin class for descriptors which are also scopes.'\n d... | [
{
"content": "<|memory_start|>```python\n# Abstract syntax tree for CODA IDL\n\n# ============================================================================\n# Scopes\n# ============================================================================\n\nclass DefScope:\n 'Mixin class for descriptors which are al... | ```python
# Abstract syntax tree for CODA IDL
# ============================================================================
# Scopes
# ============================================================================
class DefScope:
'Mixin class for descriptors which are also scopes.'
def __init__(self):
self.structs = []
self.enums = []
self.extensions = []
self.fields = []
self.methods = []
def addMember(self, member):
assert member is not None
if isinstance(member, StructDef):
if member.isExtension():
self.extensions.append(member)
else:
self.structs.append(member)
elif isinstance(member, EnumDef):
self.enums.append(member)
elif isinstance(member, StructDef.Field):
self.fields.append(member)
elif isinstance(member, StructDef.Method):
self.methods.append(member)
else:
raise AssertionError('Invalid member: ' + str(member))
# ============================================================================
# Type descriptors
# ============================================================================
class Ident:
'''An identifier.'''
def __init__(self, location, value):
self.location = location
self.value = value
def __str__(self):
return self.value
class TypeName:
'''A reference to a type name.'''
def __init__(self, location, name):
self.location = location
self.name = name
def __str__(self):
return self.name
class SpecializedType:
'''A generic type plus type arguments.'''
def __init__(self, base, args):
self.location = base.location
self.base = base
self.args = args
def __str__(self):
return '{0}[{1}]'.format(self.base, ', '.join(str(ty) for ty in self.args))
class ModifiedType:
'''A const or shared type.'''
def __init__(self, base):
self.location = base.location
self.base = base
self.const = False
self.shared = False
def __str__(self):
return ''.join([
('const ' if self.const else ''),
('shared ' if self.shared else ''),
(str(self.base))])
class Param:
'''A method parameter.'''
def __init__(self, location, name, ty):
self.name = name
self.type = ty
def __str__(self):
return '{0}:{1}'.format(self.name, self.type)
class AbstractDef:
'''A type defined that has a name.'''
def __init__(self, location, name):
self.location = location
self.name = name
self.options = []
def __str__(self):
return self.name
class StructDef(AbstractDef, DefScope):
'''A structure type.'''
class Field:
def __init__(self, location, name, fieldType, index):
self.location = location
self.name = name
self.fieldType = fieldType
self.options = []
self.index = index
class Method:
def __init__(self, location, name, params, returnType, index = -1):
self.location = location
self.name = name
self.params = params
self.returnType = returnType
self.options = []
self.index = index
def __init__(self, location, name):
super().__init__(location, name)
DefScope.__init__(self)
self.baseType = None # For subtypes
self.extends = None # For extensions
self.typeId = None
self.extensionRange = (0, 0)
def getOptions(self):
return self.options
def addOptions(self, optionList):
for option in optionList:
self.options.append(option)
def getExtensionRange(self):
return self.extensionRange
def setExtensionRange(self, extRange):
self.extensionRange = extRange
def isExtension(self):
return self.extends is not None
class EnumDef(AbstractDef):
'''An enumeration type.'''
class Value:
def __init__(self, location, name, value):
self.location = location
self.name = name
self.value = value
def __init__(self, location, name):
super().__init__(location, name)
self.values = []
def addValues(self, values):
self.values += values
# ============================================================================
# Options
# ============================================================================
class Option:
def __init__(self, location, name, scope, value):
assert location is not None
assert name is not None
assert value is not None
self.location = location
self.scope = scope
self.name = name
self.value = value
def __str__(self):
if self.scope:
return '{0}:{1} = {2}'.format(self.scope, self.name, self.value)
else:
return '{0} = {1}'.format(self.name, self.value)
# ============================================================================
# Values - used for both options and constants
# ============================================================================
class Value:
def __init__(self, location):
self.location = location
# Boolean value
class BooleanValue(Value):
def __init__(self, location, value):
super().__init__(location)
self.value = value
def __str__(self):
return 'True' if self.value else 'False'
# Integer value
class IntegerValue(Value):
def __init__(self, location, value):
super().__init__(location)
self.value = value
def __str__(self):
return str(self.value)
# String value
class StringValue(Value):
def __init__(self, location, value):
super().__init__(location)
self.value = value
def __str__(self):
return repr(self.value)
# List value
class ListValue(Value):
def __init__(self, location, value):
super().__init__(location)
self.value = value
def __str__(self):
return '[' + ', '.join(str(value) for value in self.value) + ']'
# ============================================================================
# File descriptor
# ============================================================================
class File(DefScope):
'''Descriptor for a CODA IDL file.'''
def __init__(self, path, package=None):
super().__init__()
self.path = path
self.package = package
self.imports = []
self.options = []
def getPackage(self):
'The declared _package of this file'
return self.package
``` |
[
{
"content": "Repeat the code precisely:\n```python\n# -*- coding: utf-8 -*-\nimport tornado.web\nimport tornado.ioloop\nfrom tornado.testing import AsyncHTTPTestCase\nfrom tortik.page import RequestHandler\nimport tornado.curl_httpclient\n\n\ndef first_postprocessor(handler, data, callback):\n callback(hand... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport tornado.web\nimport tornado.ioloop\nfrom tornado.testing import AsyncHTTPTestCase\nfrom tortik.page import RequestHandler\nimport tornado.curl_httpclient\n\n\ndef first_postprocessor(handler, data, callback):\n ... | ```python
# -*- coding: utf-8 -*-
import tornado.web
import tornado.ioloop
from tornado.testing import AsyncHTTPTestCase
from tortik.page import RequestHandler
import tornado.curl_httpclient
def first_postprocessor(handler, data, callback):
callback(handler, data.replace('Hello,', 'Good'))
def second_postprocessor(handler, data, callback):
callback(handler, data.replace('Good world', 'Good bye'))
class MainHandler(RequestHandler):
postprocessors = [
first_postprocessor,
second_postprocessor,
]
def get(self):
self.complete('Hello, world!')
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/', MainHandler),
]
settings = dict(
debug=True,
)
tornado.web.Application.__init__(self, handlers, **settings)
class PostprocessorHTTPTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application()
def get_new_ioloop(self):
return tornado.ioloop.IOLoop.instance()
def test_main(self):
self.http_client.fetch(self.get_url('/'), self.stop)
response = self.wait()
self.assertEqual(200, response.code)
self.assertIn(b'Good bye!', response.body)
``` |
[
{
"content": "```python\n##\n## This file is part of the libsigrokdecode project.\n##\n## Copyright (C) 2014 Angus Gratton <gus@projectgus.com>\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Softw... | [
{
"content": "<|memory_start|>```python\n##\n## This file is part of the libsigrokdecode project.\n##\n## Copyright (C) 2014 Angus Gratton <gus@projectgus.com>\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n#... | ```python
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Angus Gratton <gus@projectgus.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
import re
'''
OUTPUT_PYTHON format:
Packet:
[<ptype>, <pdata>]
<ptype>:
- 'AP_READ' (AP read)
- 'DP_READ' (DP read)
- 'AP_WRITE' (AP write)
- 'DP_WRITE' (DP write)
- 'LINE_RESET' (line reset sequence)
<pdata>:
- tuple of address, ack state, data for the given sequence
'''
swd_states = [
'IDLE', # Idle/unknown
'REQUEST', # Request phase (first 8 bits)
'ACK', # Ack phase (next 3 bits)
'READ', # Reading phase (next 32 bits for reads)
'WRITE', # Writing phase (next 32 bits for write)
'DPARITY', # Data parity phase
]
# Regexes for matching SWD data out of bitstring ('1' / '0' characters) format
RE_SWDSWITCH = re.compile(bin(0xE79E)[:1:-1] + '$')
RE_SWDREQ = re.compile(r'1(?P<apdp>.)(?P<rw>.)(?P<addr>..)(?P<parity>.)01$')
RE_IDLE = re.compile('0' * 50 + '$')
# Sample edges
RISING = 1
FALLING = 0
ADDR_DP_SELECT = 0x8
ADDR_DP_CTRLSTAT = 0x4
BIT_SELECT_CTRLSEL = 1
BIT_CTRLSTAT_ORUNDETECT = 1
ANNOTATIONS = ['reset', 'enable', 'read', 'write', 'ack', 'data', 'parity']
class Decoder(srd.Decoder):
api_version = 3
id = 'swd'
name = 'SWD'
longname = 'Serial Wire Debug'
desc = 'Two-wire protocol for debug access to ARM CPUs.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['swd']
tags = ['Debug/trace']
channels = (
{'id': 'swclk', 'name': 'SWCLK', 'desc': 'Master clock'},
{'id': 'swdio', 'name': 'SWDIO', 'desc': 'Data input/output'},
)
options = (
{'id': 'strict_start',
'desc': 'Wait for a line reset before starting to decode',
'default': 'no', 'values': ('yes', 'no')},
)
annotations = (
('reset', 'RESET'),
('enable', 'ENABLE'),
('read', 'READ'),
('write', 'WRITE'),
('ack', 'ACK'),
('data', 'DATA'),
('parity', 'PARITY'),
)
def __init__(self):
self.reset()
def reset(self):
# SWD data/clock state
self.state = 'UNKNOWN'
self.sample_edge = RISING
self.ack = None # Ack state of the current phase
self.ss_req = 0 # Start sample of current req
self.turnaround = 0 # Number of turnaround edges to ignore before continuing
self.bits = '' # Bits from SWDIO are accumulated here, matched against expected sequences
self.samplenums = [] # Sample numbers that correspond to the samples in self.bits
self.linereset_count = 0
# SWD debug port state
self.data = None
self.addr = None
self.rw = None # Are we inside an SWD read or a write?
self.ctrlsel = 0 # 'ctrlsel' is bit 0 in the SELECT register.
self.orundetect = 0 # 'orundetect' is bit 0 in the CTRLSTAT register.
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_python = self.register(srd.OUTPUT_PYTHON)
if self.options['strict_start'] == 'no':
self.state = 'REQ' # No need to wait for a LINE RESET.
def putx(self, ann, length, data):
'''Output annotated data.'''
ann = ANNOTATIONS.index(ann)
try:
ss = self.samplenums[-length]
except IndexError:
ss = self.samplenums[0]
if self.state == 'REQ':
self.ss_req = ss
es = self.samplenum
self.put(ss, es, self.out_ann, [ann, [data]])
def putp(self, ptype, pdata):
self.put(self.ss_req, self.samplenum, self.out_python, [ptype, pdata])
def put_python_data(self):
'''Emit Python data item based on current SWD packet contents.'''
ptype = {
('AP', 'R'): 'AP_READ',
('AP', 'W'): 'AP_WRITE',
('DP', 'R'): 'DP_READ',
('DP', 'W'): 'DP_WRITE',
}[(self.apdp, self.rw)]
self.putp(ptype, (self.addr, self.data, self.ack))
def decode(self):
while True:
# Wait for any clock edge.
(clk, dio) = self.wait({0: 'e'})
# Count rising edges with DIO held high,
# as a line reset (50+ high edges) can happen from any state.
if clk == RISING:
if dio == 1:
self.linereset_count += 1
else:
if self.linereset_count >= 50:
self.putx('reset', self.linereset_count, 'LINERESET')
self.putp('LINE_RESET', None)
self.reset_state()
self.linereset_count = 0
# Otherwise, we only care about either rising or falling edges
# (depending on sample_edge, set according to current state).
if clk != self.sample_edge:
continue
# Turnaround bits get skipped.
if self.turnaround > 0:
self.turnaround -= 1
continue
self.bits += str(dio)
self.samplenums.append(self.samplenum)
{
'UNKNOWN': self.handle_unknown_edge,
'REQ': self.handle_req_edge,
'ACK': self.handle_ack_edge,
'DATA': self.handle_data_edge,
'DPARITY': self.handle_dparity_edge,
}[self.state]()
def next_state(self):
'''Step to the next SWD state, reset internal counters accordingly.'''
self.bits = ''
self.samplenums = []
self.linereset_count = 0
if self.state == 'UNKNOWN':
self.state = 'REQ'
self.sample_edge = RISING
self.turnaround = 0
elif self.state == 'REQ':
self.state = 'ACK'
self.sample_edge = FALLING
self.turnaround = 1
elif self.state == 'ACK':
self.state = 'DATA'
self.sample_edge = RISING if self.rw == 'W' else FALLING
self.turnaround = 0 if self.rw == 'R' else 2
elif self.state == 'DATA':
self.state = 'DPARITY'
elif self.state == 'DPARITY':
#self.put_python_data()
self.state = 'REQ'
self.sample_edge = RISING
self.turnaround = 1 if self.rw == 'R' else 0
def reset_state(self):
'''Line reset (or equivalent), wait for a new pending SWD request.'''
#if self.state != 'REQ': # Emit a Python data item.
# self.put_python_data()
# Clear state.
self.bits = ''
self.samplenums = []
self.linereset_count = 0
self.turnaround = 0
self.sample_edge = RISING
self.data = ''
self.ack = None
self.state = 'REQ'
def handle_unknown_edge(self):
'''
Clock edge in the UNKNOWN state.
In the unknown state, clock edges get ignored until we see a line
reset (which is detected in the decode method, not here.)
'''
pass
def handle_req_edge(self):
'''Clock edge in the REQ state (waiting for SWD r/w request).'''
# Check for a JTAG->SWD enable sequence.
m = re.search(RE_SWDSWITCH, self.bits)
if m is not None:
self.putx('enable', 16, 'JTAG->SWD')
self.reset_state()
return
# Or a valid SWD Request packet.
m = re.search(RE_SWDREQ, self.bits)
if m is not None:
calc_parity = sum([int(x) for x in m.group('rw') + m.group('apdp') + m.group('addr')]) % 2
parity = '' if str(calc_parity) == m.group('parity') else 'E'
self.rw = 'R' if m.group('rw') == '1' else 'W'
self.apdp = 'AP' if m.group('apdp') == '1' else 'DP'
self.addr = int(m.group('addr')[::-1], 2) << 2
self.putx('read' if self.rw == 'R' else 'write', 8, self.get_address_description())
self.next_state()
return
def handle_ack_edge(self):
'''Clock edge in the ACK state (waiting for complete ACK sequence).'''
if len(self.bits) < 3:
return
if self.bits == '100':
self.putx('ack', 3, 'OK')
self.ack = 'OK'
self.next_state()
elif self.bits == '001':
self.putx('ack', 3, 'FAULT')
self.ack = 'FAULT'
if self.orundetect == 1:
self.next_state()
else:
self.reset_state()
self.turnaround = 1
elif self.bits == '010':
self.putx('ack', 3, 'WAIT')
self.ack = 'WAIT'
if self.orundetect == 1:
self.next_state()
else:
self.reset_state()
self.turnaround = 1
elif self.bits == '111':
self.putx('ack', 3, 'NOREPLY')
self.ack = 'NOREPLY'
self.reset_state()
else:
self.putx('ack', 3, 'ERROR')
self.ack = 'ERROR'
self.reset_state()
def handle_data_edge(self):
'''Clock edge in the DATA state (waiting for 32 bits to clock past).'''
if len(self.bits) < 32:
return
self.data = 0
self.dparity = 0
for x in range(32):
if self.bits[x] == '1':
self.data += (1 << x)
self.dparity += 1
self.dparity = self.dparity % 2
self.putx('data', 32, '0x%08x' % self.data)
self.next_state()
def handle_dparity_edge(self):
'''Clock edge in the DPARITY state (clocking in parity bit).'''
if str(self.dparity) != self.bits:
self.putx('parity', 1, str(self.dparity) + self.bits) # PARITY ERROR
elif self.rw == 'W':
self.handle_completed_write()
self.next_state()
def handle_completed_write(self):
'''
Update internal state of the debug port based on a completed
write operation.
'''
if self.apdp != 'DP':
return
elif self.addr == ADDR_DP_SELECT:
self.ctrlsel = self.data & BIT_SELECT_CTRLSEL
elif self.addr == ADDR_DP_CTRLSTAT and self.ctrlsel == 0:
self.orundetect = self.data & BIT_CTRLSTAT_ORUNDETECT
def get_address_description(self):
'''
Return a human-readable description of the currently selected address,
for annotated results.
'''
if self.apdp == 'DP':
if self.rw == 'R':
# Tables 2-4 & 2-5 in ADIv5.2 spec ARM document IHI 0031C
return {
0: 'IDCODE',
0x4: 'R CTRL/STAT' if self.ctrlsel == 0 else 'R DLCR',
0x8: 'RESEND',
0xC: 'RDBUFF'
}[self.addr]
elif self.rw == 'W':
# Tables 2-4 & 2-5 in ADIv5.2 spec ARM document IHI 0031C
return {
0: 'W ABORT',
0x4: 'W CTRL/STAT' if self.ctrlsel == 0 else 'W DLCR',
0x8: 'W SELECT',
0xC: 'W RESERVED'
}[self.addr]
elif self.apdp == 'AP':
if self.rw == 'R':
return 'R AP%x' % self.addr
elif self.rw == 'W':
return 'W AP%x' % self.addr
# Any legitimate operations shouldn't fall through to here, probably
# a decoder bug.
return '? %s%s%x' % (self.rw, self.apdp, self.addr)
``` |
[
{
"content": "Here is a code file:\n```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nProvides constants common in the Bluetooth HCI protocol.\n\"\"\"\n\nimport enum\n\nHCI_MAX_EVENT_SIZE = 260\n\n\nclass Status(enum.IntEnum):\n \"\"\"\n Collection of HCI return states.\n \"\"\"\n Success = 0x00\n Un... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nProvides constants common in the Bluetooth HCI protocol.\n\"\"\"\n\nimport enum\n\nHCI_MAX_EVENT_SIZE = 260\n\n\nclass Status(enum.IntEnum):\n \"\"\"\n Collection of HCI return states.\n \"\"\"\n Succes... | ```python
# -*- coding: utf-8 -*-
"""
Provides constants common in the Bluetooth HCI protocol.
"""
import enum
HCI_MAX_EVENT_SIZE = 260
class Status(enum.IntEnum):
"""
Collection of HCI return states.
"""
Success = 0x00
UnknownHciCommand = 0x01
UnknownConnectionIdentifier = 0x02
HardwareFailure = 0x03
PageTimeout = 0x04
AuthenticationFailure = 0x05
PinOrKeyMissing = 0x06
MemoryCapacityExceeded = 0x07
ConnectionTimeout = 0x08
ConnectionLimitExceeded = 0x09
SynchronousConnectionLimitExceeded = 0x0a
ACLConnectionAlreadyExists = 0x0b
CommandDisallowed = 0x0c
ConnectionRejectedLimitedResources = 0x0d
ConnectionRejectedSecurityReasons = 0x0e
ConnectionRejectedUnnacceptableBDAddr = 0x0f
ConnectionAcceptTimeoutExceeded = 0x10
UnsupportedFeatureOrParameterValue = 0x11
InvalidHciCommandParameters = 0x12
RemoteUserTerminatedConnection = 0x13
RemoteDeviceTerminatedConnectionLowResources = 0x14
RemoteDeviceTerminatedConnectionPowerOff = 0x15
ConnectionTerminatedLocalHost = 0x16
RepeatedAttempts = 0x17
PairingNotAllowed = 0x18
UnknownLmpPdu = 0x19
UnsupportedRemoteFeature = 0x1a
ScoOffsetRejected = 0x1b
ScoIntervalRejected = 0x1c
ScoAirModeRejected = 0x1d
InvalidLmpParameters = 0x1e
UnspecifiedError = 0x1f
UnsupportedLmpParameterValue = 0x20
RoleChangeNotAllowed = 0x21
LmpResponseTimeout = 0x22
LmpErrorTransactionCollision = 0x23
LmpPduNotAllowed = 0x24
EncryptionModeNotAcceptable = 0x25
LinkKeyCannotChange = 0x26
RequestedQosNotSupported = 0x27
InstantPassed = 0x28
PairingWithUnitKeyNotSupported = 0x29
DifferentTransactionCollision = 0x2a
QosUnnacceptableParameter = 0x2c
QosRejected = 0x2d
ChannelClassificationNotSupported = 0x2e
InsufficientSecurity = 0x2f
ParameterOutOfMandatoryRange = 0x30
RoleSwitchPending = 0x32
RoleSwitchFailed = 0x35
ExtendedInquiryResponseTooLarge = 0x36
SecureSimplePairingNotSupportedByHost = 0x37
HostBusyPairing = 0x38
ConnectionRejectedNoSuitableChannel = 0x39
ControllerBusy = 0x3a
UnacceptableConnectionParameters = 0x3b
DirectedAdvertisingTimeout = 0x3c
ConnectionTerminatedMicFailure = 0x3d
ConnectionEstablishFailure = 0x3e
MacConnectionFailed = 0x3f
CoarseClockAdjustmentRejected = 0x40
class PacketType(enum.IntEnum):
"""
Known HCI packet types.
"""
Invalid = 0x00
Command = 0x01
Async = 0x02
Sync = 0x03
Event = 0x04
class Event(enum.IntEnum):
"""
Common HCI event types.
"""
CommandComplete = 0x0e
CommandStatus = 0x0f
HardwareError = 0x10
DataBufferOverflow = 0x1a
Le = 0x3e
VendorSpecific = 0xff
class LeEvent(enum.IntEnum):
"""
Common HCI LE event types.
"""
LeAdvertisingReport = 0x02
class GapProfile(enum.IntEnum):
"""
GAP communication roles/profiles.
"""
Broadcaster = 0x01
Observer = 0x02
Peripheral = 0x04
Central = 0x08
class DiscoveryType(enum.IntEnum):
"""
LeAdvertisingReport message type.
"""
ConnectableUndirectedAdvertising = 0x00
ConnectableDirectedAdvertising = 0x01
ScannableUndirectedAdvertising = 0x02
NonConnectableUndirectedAdvertising = 0x03
ScanResponse = 0x04
class AddressType(enum.IntEnum):
"""
Device address type.
"""
PublicDeviceAddress = 0x00
RandomDeviceAddress = 0x01
PublicIdentityAddress = 0x02
RandomIdentityAddress = 0x03
UnknownAddressType = 0x04
class ScanType(enum.IntEnum):
"""
LE scan type.
"""
PassiveScan = 0x00
ActiveScan = 0x01
class FilterPolicy(enum.IntEnum):
"""
LE scan filter policy.
"""
UndirectedAdsOnly = 0x00
WhitelistedOnly = 0x01
ResolvableDirected = 0x02
WhitelistedAndResolvableDirected = 0x03
class AdType(enum.IntEnum):
"""
Advertisement data type.
"""
Flags = 0x01
IncompleteListOf16BitServiceClassUUIDs = 0x02
CompleteListOf16BitServiceClassUUIDs = 0x03
IncompleteListOf32BitServiceClassUUIDs = 0x04
CompleteListOf32BitServiceClassUUIDs = 0x05
IncompleteListOf128BitServiceClassUUIDs = 0x06
CompleteListOf128BitServiceClassUUIDs = 0x07
ShortenedLocalName = 0x08
CompleteLocalName = 0x09
TxPowerLevel = 0x0a
ClassOfDevice = 0x0d
SimplePairingHashC192 = 0x0e
SimplePairingRandomizerR192 = 0x0f
SecurityManagerTKValue = 0x10
SecurityManagerOutOfBandFlags = 0x11
SlaveConnectionIntervalRange = 0x12
ListOf16BitServiceSolicitationUUIDs = 0x14
ListOf32BitServiceSolicitationUUIDs = 0x1f
ListOf128BitServiceSolicitationUUIDs = 0x15
ServiceData16BitUUID = 0x16
ServiceData32BitUUID = 0x20
ServiceData128BitUUID = 0x21
LeSecureConnectionsConfirmationValue = 0x22
LeSecureConnectionsRandomValue = 0x23
URI = 0x24
IndoorPositioning = 0x25
TransportDiscoveryData = 0x26
PublicTargetAddress = 0x17
RandomTargetAddress = 0x18
Appearance = 0x19
AdvertisingInterval = 0x1a
LeBluetoothDeviceAddress = 0x1b
LeRole = 0x1c
SimplePairingHashC256 = 0x1d
SimplePairingRandomizerR256 = 0x1e
InformationData = 0x3d
ManufacturerSpecificData = 0xff
class CompanyId(enum.IntEnum):
"""
Known company identifiers.
"""
EricssonTechnologyLicensing = 0x0000
NokiaMobilePhones = 0x0001
IntelCorp = 0x0002
IBMCorp = 0x0003
ToshibaCorp = 0x0004
ThreeCom = 0x0005
Microsoft = 0x0006
Lucent = 0x0007
Motorola = 0x0008
InfineonTechnologiesAG = 0x0009
CambridgeSiliconRadio = 0x000a
SiliconWave = 0x000b
DigianswerAS = 0x000c
TexasInstrumentsInc = 0x000d
CevaInc = 0x000e
BroadcomCorporation = 0x000f
MitelSemiconductor = 0x0010
WidcommInc = 0x0011
ZeevoInc = 0x0012
AtmelCorporation = 0x0013
MitsubishiElectricCorporation = 0x0014
RTXTelecomAS = 0x0015
KCTechnologyInc = 0x0016
NewLogic = 0x0017
TransilicaInc = 0x0018
RohdeSchwarzGmbHCoKG = 0x0019
TTPComLimited = 0x001a
SigniaTechnologiesInc = 0x001b
ConexantSystemsInc = 0x001c
Qualcomm = 0x001d
Inventel = 0x001e
AVMBerlin = 0x001f
BandSpeedInc = 0x0020
MansellaLtd = 0x0021
NECCorporation = 0x0022
WavePlusTechnologyCoLtd = 0x0023
Alcatel = 0x0024
NXPSemiconductors = 0x0025
CTechnologies = 0x0026
OpenInterface = 0x0027
RFMicroDevices = 0x0028
HitachiLtd = 0x0029
SymbolTechnologiesInc = 0x002a
Tenovis = 0x002b
MacronixInternationalCoLtd = 0x002c
GCTSemiconductor = 0x002d
NorwoodSystems = 0x002e
MewTelTechnologyInc = 0x002f
STMicroelectronics = 0x0030
Synopsis = 0x0031
RedMLtd = 0x0032
CommilLtd = 0x0033
ComputerAccessTechnologyCorporation = 0x0034
EclipseSL = 0x0035
RenesasElectronicsCorporation = 0x0036
MobilianCorporation = 0x0037
Terax = 0x0038
IntegratedSystemSolutionCorp = 0x0039
MatsushitaElectricIndustrialCoLtd = 0x003a
GennumCorporation = 0x003b
BlackBerryLimited = 0x003c
IPextremeInc = 0x003d
SystemsandChipsInc = 0x003e
BluetoothSIGInc = 0x003f
SeikoEpsonCorporation = 0x0040
IntegratedSiliconSolutionTaiwanInc = 0x0041
CONWISETechnologyCorporationLtd = 0x0042
PARROTSA = 0x0043
SocketMobile = 0x0044
AtherosCommunicationsInc = 0x0045
MediaTekInc = 0x0046
Bluegiga = 0x0047
MarvellTechnologyGroupLtd = 0x0048
ThreeDSPCorporation = 0x0049
AccelSemiconductorLtd = 0x004a
ContinentalAutomotiveSystems = 0x004b
AppleInc = 0x004c
StaccatoCommunicationsInc = 0x004d
AvagoTechnologies = 0x004e
APTLicensingLtd = 0x004f
SiRFTechnology = 0x0050
TzeroTechnologiesInc = 0x0051
JMCorporation = 0x0052
Free2moveAB = 0x0053
ThreeDiJoyCorporation = 0x0054
PlantronicsInc = 0x0055
SonyEricssonMobileCommunications = 0x0056
HarmanInternationalIndustriesInc = 0x0057
VizioInc = 0x0058
NordicSemiconductorASA = 0x0059
EMMicroelectronicMarinSA = 0x005a
RalinkTechnologyCorporation = 0x005b
BelkinInternationalInc = 0x005c
RealtekSemiconductorCorporation = 0x005d
StonestreetOneLLC = 0x005e
WicentricInc = 0x005f
RivieraWavesSAS = 0x0060
RDAMicroelectronics = 0x0061
GibsonGuitars = 0x0062
MiCommandInc = 0x0063
BandXIInternationalLLC = 0x0064
HewlettPackardCompany = 0x0065
NineSolutionsOy = 0x0066
GNNetcomAS = 0x0067
GeneralMotors = 0x0068
ADEngineeringInc = 0x0069
MindTreeLtd = 0x006a
PolarElectroOY = 0x006b
BeautifulEnterpriseCoLtd = 0x006c
BriarTekInc = 0x006d
SummitDataCommunicationsInc = 0x006e
SoundID = 0x006f
MonsterLLC = 0x0070
connectBlueAB = 0x0071
ShangHaiSuperSmartElectronicsCoLtd = 0x0072
GroupSenseLtd = 0x0073
ZommLLC = 0x0074
SamsungElectronicsCoLtd = 0x0075
CreativeTechnologyLtd = 0x0076
LairdTechnologies = 0x0077
NikeInc = 0x0078
lesswireAG = 0x0079
MStarSemiconductorInc = 0x007a
HanlynnTechnologies = 0x007b
ARCambridge = 0x007c
SeersTechnologyCoLtd = 0x007d
SportsTrackingTechnologiesLtd = 0x007e
AutonetMobile = 0x007f
DeLormePublishingCompanyInc = 0x0080
WuXiVimicro = 0x0081
SennheiserCommunicationsAS = 0x0082
TimeKeepingSystemsInc = 0x0083
LudusHelsinkiLtd = 0x0084
BlueRadiosInc = 0x0085
equinoxAG = 0x0086
GarminInternationalInc = 0x0087
Ecotest = 0x0088
GNReSoundAS = 0x0089
Jawbone = 0x008a
TopcornPositioningSystemsLLC = 0x008b
GimbalInc = 0x008c
ZscanSoftware = 0x008d
QuinticCorp = 0x008e
StollmanEVGmbH = 0x008f
FunaiElectricCoLtd = 0x0090
AdvancedPANMOBILSystemsGmbHCoKG = 0x0091
ThinkOpticsInc = 0x0092
UniversalElectronicsInc = 0x0093
AirohaTechnologyCorp = 0x0094
NECLightingLtd = 0x0095
ODMTechnologyInc = 0x0096
ConnecteDeviceLtd = 0x0097
zer01tvGmbH = 0x0098
iTechDynamicGlobalDistributionLtd = 0x0099
Alpwise = 0x009a
JiangsuToppowerAutomotiveElectronicsCoLtd = 0x009b
ColorfyInc = 0x009c
GeoforceInc = 0x009d
BoseCorporation = 0x009e
SuuntoOy = 0x009f
KensingtonComputerProductsGroup = 0x00a0
SRMedizinelektronik = 0x00a1
VertuCorporationLimited = 0x00a2
MetaWatchLtd = 0x00a3
LINAKAS = 0x00a4
OTLDynamicsLLC = 0x00a5
PandaOceanInc = 0x00a6
VisteonCorporation = 0x00a7
ARPDevicesLimited = 0x00a8
MagnetiMarelliSpA = 0x00a9
CAENRFIDsrl = 0x00aa
IngenieurSystemgruppeZahnGmbH = 0x00ab
GreenThrottleGames = 0x00ac
PeterSystemtechnikGmbH = 0x00ad
OmegawaveOy = 0x00ae
Cinetix = 0x00af
PassifSemiconductorCorp = 0x00b0
SarisCyclingGroupInc = 0x00b1
BekeyAS = 0x00b2
ClarinoxTechnologiesPtyLtd = 0x00b3
BDETechnologyCoLtd = 0x00b4
SwirlNetworks = 0x00b5
Mesointernational = 0x00b6
TreLabLtd = 0x00b7
QualcommInnovationCenterInc = 0x00b8
JohnsonControlsInc = 0x00b9
StarkeyLaboratoriesInc = 0x00ba
SPowerElectronicsLimited = 0x00bb
AceSensorInc = 0x00bc
AplixCorporation = 0x00bd
AAMPofAmerica = 0x00be
StalmartTechnologyLimited = 0x00bf
AMICCOMElectronicsCorporation = 0x00c0
ShenzhenExcelsecuDataTechnologyCoLtd = 0x00c1
GeneqInc = 0x00c2
adidasAG = 0x00c3
LGElectronics = 0x00c4
OnsetComputerCorporation = 0x00c5
SelflyBV = 0x00c6
QuuppaOy = 0x00c7
GeLoInc = 0x00c8
Evluma = 0x00c9
MC10 = 0x00ca
BinauricSE = 0x00cb
BeatsElectronics = 0x00cc
MicrochipTechnologyInc = 0x00cd
ElgatoSystemsGmbH = 0x00ce
ARCHOSSA = 0x00cf
DexcomInc = 0x00d0
PolarElectroEuropeBV = 0x00d1
DialogSemiconductorBV = 0x00d2
TaixingbangTechnologyCoLTD = 0x00d3
Kawantech = 0x00d4
AustcoCommunicationSystems = 0x00d5
TimexGroupUSAInc = 0x00d6
QualcommTechnologiesInc = 0x00d7
QualcommConnectedExperiencesInc = 0x00d8
VoyetraTurtleBeach = 0x00d9
txtrGmbH = 0x00da
Biosentronics = 0x00db
ProcterGamble = 0x00dc
HosidenCorporation = 0x00dd
MuzikLLC = 0x00de
MisfitWearablesCorp = 0x00df
Google = 0x00e0
DanlersLtd = 0x00e1
SemilinkInc = 0x00e2
inMusicBrandsInc = 0x00e3
LSResearchInc = 0x00e4
EdenSoftwareConsultantsLtd = 0x00e5
Freshtemp = 0x00e6
KSTechnologies = 0x00e7
ACTSTechnologies = 0x00e8
VtrackSystems = 0x00e9
NielsenKellermanCompany = 0x00ea
ServerTechnologyInc = 0x00eb
BioResearchAssociates = 0x00ec
JollyLogicLLC = 0x00ed
AboveAverageOutcomesInc = 0x00ee
BitsplittersGmbH = 0x00ef
PayPalInc = 0x00f0
WitronTechnologyLimited = 0x00f1
AetherThingsInc = 0x00f2
KentDisplaysInc = 0x00f3
NautilusInc = 0x00f4
SmartifierOy = 0x00f5
ElcometerLimited = 0x00f6
VSNTechnologiesInc = 0x00f7
AceUniCorpLtd = 0x00f8
StickNFind = 0x00f9
CrystalCodeAB = 0x00fa
KOUKAAMas = 0x00fb
DelphiCorporation = 0x00fc
ValenceTechLimited = 0x00fd
Reserved = 0x00fe
TypoProductsLLC = 0x00ff
TomTomInternationalBV = 0x0100
FugooInc = 0x0101
KeiserCorporation = 0x0102
BangOlufsenAS = 0x0103
PLUSLocationsSystemsPtyLtd = 0x0104
UbiquitousComputingTechnologyCorporation = 0x0105
InnovativeYachtterSolutions = 0x0106
WilliamDemantHoldingAS = 0x0107
ChiconyElectronicsCoLtd = 0x0108
AtusBV = 0x0109
CodegateLtd = 0x010a
ERiInc = 0x010b
TransducersDirectLLC = 0x010c
FujitsuTenLimited = 0x010d
AudiAG = 0x010e
HiSiliconTechnologiesCoLtd = 0x010f
NipponSeikiCoLtd = 0x0110
SteelseriesApS = 0x0111
VisyblInc = 0x0112
OpenbrainTechnologiesCoLtd = 0x0113
Xensr = 0x0114
esolutions = 0x0115
OneOAKTechnologies = 0x0116
WimotoTechnologiesInc = 0x0117
RadiusNetworksInc = 0x0118
WizeTechnologyCoLtd = 0x0119
QualcommLabsInc = 0x011a
ArubaNetworks = 0x011b
Baidu = 0x011c
ArendiAG = 0x011d
SkodaAutoas = 0x011e
VolkswagonAG = 0x011f
PorscheAG = 0x0120
SinoWealthElectronicLtd = 0x0121
AirTurnInc = 0x0122
KinsaInc = 0x0123
HIDGlobal = 0x0124
SEATes = 0x0125
PrometheanLtd = 0x0126
SaluticaAlliedSolutions = 0x0127
GPSIGroupPtyLtd = 0x0128
NimbleDevicesOy = 0x0129
ChangzhouYongseInfotechCoLtd = 0x012a
SportIQ = 0x012b
TEMECInstrumentsBV = 0x012c
SonyCorporation = 0x012d
ASSAABLOY = 0x012e
ClarionCoLtd = 0x012f
WarehouseInnovations = 0x0130
CypressSemiconductorCorporation = 0x0131
MADSInc = 0x0132
BlueMaestroLimited = 0x0133
ResolutionProductsInc = 0x0134
AirewearLLC = 0x0135
SeedLabsInc = 0x0136
PrestigioPlazaLtd = 0x0137
NTEOInc = 0x0138
FocusSystemsCorporation = 0x0139
TencentHoldingsLimited = 0x013a
Allegion = 0x013b
MurataManufacuringCoLtd = 0x013c
NodInc = 0x013e
BBManufacturingCompany = 0x013f
AlpineElectronicsCoLtd = 0x0140
FedExServices = 0x0141
GrapeSystemsInc = 0x0142
BkonConnect = 0x0143
LintechGmbH = 0x0144
NovatelWireless = 0x0145
Ciright = 0x0146
MightyCastInc = 0x0147
AmbimatElectronics = 0x0148
PerytonsLtd = 0x0149
TivoliAudioLLC = 0x014a
MasterLock = 0x014b
MeshNetLtd = 0x014c
HuizhouDesaySVAutomotiveCOLTD = 0x014d
TangerineInc = 0x014e
BWGroupLtd = 0x014f
PioneerCorporation = 0x0150
OnBeep = 0x0151
VernierSoftwareTechnology = 0x0152
ROLErgo = 0x0153
PebbleTechnology = 0x0154
NETATMO = 0x0155
AccumulateAB = 0x0156
AnhuiHuamiInformationTechnologyCoLtd = 0x0157
Inmitesro = 0x0158
ChefStepsInc = 0x0159
micasAG = 0x015a
BiomedicalResearchLtd = 0x015b
PitiusTecSL = 0x015c
EstimoteInc = 0x015d
UnikeyTechnologiesInc = 0x015e
TimerCapCo = 0x015f
AwoX = 0x0160
yikes = 0x0161
MADSGlobalNZLtd = 0x0162
PCHInternational = 0x0163
QingdaoYeelinkInformationTechnologyCoLtd = 0x0164
MilwaukeeTool = 0x0165
MISHIKPteLtd = 0x0166
BayerHealthCare = 0x0167
SpiceboxLLC = 0x0168
emberlight = 0x0169
CooperAtkinsCorporation = 0x016a
Qblinks = 0x016b
MYSPHERA = 0x016c
LifeScanInc = 0x016d
VolanticAB = 0x016e
PodoLabsInc = 0x016f
FHoffmannLaRocheAG = 0x0170
AmazonFulfillmentService = 0x0171
ConnovateTechnologyPrivateLimited = 0x0172
KocomojoLLC = 0x0173
EverykeyLLC = 0x0174
DynamicControls = 0x0175
SentriLock = 0x0176
ISYSTinc = 0x0177
CASIOCOMPUTERCOLTD = 0x0178
LAPISSemiconductorCoLtd = 0x0179
TelemonitorInc = 0x017a
taskitGmbH = 0x017b
DaimlerAG = 0x017c
BatAndCat = 0x017d
BluDotzLtd = 0x017e
XTelApS = 0x017f
GigasetCommunicationsGmbH = 0x0180
GeckoHealthInnovationsInc = 0x0181
HOPUbiquitous = 0x0182
ToBeAssigned = 0x0183
Nectar = 0x0184
belappsLLC = 0x0185
CORELightingLtd = 0x0186
SeraphimSenseLtd = 0x0187
UnicoRBC = 0x0188
PhysicalEnterprisesInc = 0x0189
AbleTrendTechnologyLimited = 0x018a
KonicaMinoltaInc = 0x018b
WiloSE = 0x018c
ExtronDesignServices = 0x018d
FitbitInc = 0x018e
FirefliesSystems = 0x018f
IntellettoTechnologiesInc = 0x0190
FDKCORPORATION = 0x0191
CloudleafInc = 0x0192
MavericAutomationLLC = 0x0193
AcousticStreamCorporation = 0x0194
Zuli = 0x0195
PaxtonAccessLtd = 0x0196
WiSilicaInc = 0x0197
VengitLimited = 0x0198
SALTOSYSTEMSSL = 0x0199
TRONForum = 0x019a
CUBETECHsro = 0x019b
CokiyaIncorporated = 0x019c
CVSHealth = 0x019d
Ceruus = 0x019e
StrainstallLtd = 0x019f
ChannelEnterprisesLtd = 0x01a0
FIAMM = 0x01a1
GIGALANECOLTD = 0x01a2
EROAD = 0x01a3
MineSafetyAppliances = 0x01a4
IconHealthandFitness = 0x01a5
AsandooGmbH = 0x01a6
ENERGOUSCORPORATION = 0x01a7
Taobao = 0x01a8
CanonInc = 0x01a9
GeophysicalTechnologyInc = 0x01aa
FacebookInc = 0x01ab
NiproDiagnosticsInc = 0x01ac
FlightSafetyInternational = 0x01ad
EarlensCorporation = 0x01ae
SunriseMicroDevicesInc = 0x01af
StarMicronicsCoLtd = 0x01b0
NetizensSpzoo = 0x01b1
NymiInc = 0x01b2
NytecInc = 0x01b3
TrineoSpzoo = 0x01b4
NestLabsInc = 0x01b5
LMTechnologiesLtd = 0x01b6
GeneralElectricCompany = 0x01b7
iD3SL = 0x01b8
HANAMicron = 0x01b9
StagesCyclingLLC = 0x01ba
CochlearBoneAnchoredSolutionsAB = 0x01bb
SenionLabAB = 0x01bc
SyszoneCoLtd = 0x01bd
PulsateMobileLtd = 0x01be
HongKongHunterSunElectronicLimited = 0x01bf
pironexGmbH = 0x01c0
BRADATECHCorp = 0x01c1
TransenergooilAG = 0x01c2
Bunch = 0x01c3
DMEMicroelectronics = 0x01c4
BitcrazeAB = 0x01c5
HASWAREInc = 0x01c6
AbiogenixInc = 0x01c7
PolyControlApS = 0x01c8
Avion = 0x01c9
LaerdalMedicalAS = 0x01ca
FetchMyPet = 0x01cb
SamLabsLtd = 0x01cc
ChengduSynwingTechnologyLtd = 0x01cd
HOUWASYSTEMDESIGNkk = 0x01ce
BSH = 0x01cf
PrimusInterParesLtd = 0x01d0
August = 0x01d1
GillElectronics = 0x01d2
SkyWaveDesign = 0x01d3
NewlabSrl = 0x01d4
ELADsrl = 0x01d5
Gwearablesinc = 0x01d6
SquadroneSystemsInc = 0x01d7
CodeCorporation = 0x01d8
SavantSystemsLLC = 0x01d9
LogitechInternationalSA = 0x01da
InnblueConsulting = 0x01db
iParkingLtd = 0x01dc
KoninklijkePhilipsElectronicsNV = 0x01dd
MinelabElectronicsPtyLimited = 0x01de
BisonGroupLtd = 0x01df
WidexAS = 0x01e0
JollaLtd = 0x01e1
LectronixInc = 0x01e2
CaterpillarInc = 0x01e3
FreedomInnovations = 0x01e4
DynamicDevicesLtd = 0x01e5
TechnologySolutionsLtd = 0x01e6
IPSGroupInc = 0x01e7
STIR = 0x01e8
SanoInc = 0x01e9
AdvancedApplicationDesignInc = 0x01ea
AutoMapLLC = 0x01eb
SpreadtrumCommunicationsShanghaiLtd = 0x01ec
CuteCircuitLTD = 0x01ed
ValeoService = 0x01ee
FullpowerTechnologiesInc = 0x01ef
KloudNation = 0x01f0
ZebraTechnologiesCorporation = 0x01f1
ItronInc = 0x01f2
TheUniversityofTokyo = 0x01f3
UTCFireandSecurity = 0x01f4
CoolWebthingsLimited = 0x01f5
DJOGlobal = 0x01f6
GellinerLimited = 0x01f7
AnykaMicroelectronicsTechnologyCoLTD = 0x01f8
MedtronicInc = 0x01f9
GozioInc = 0x01fa
FormLiftingLLC = 0x01fb
WahooFitnessLLC = 0x01fc
KontaktMicroLocationSpzoo = 0x01fd
RadioSystemCorporation = 0x01fe
FreescaleSemiconductorInc = 0x01ff
VerifoneSystemsPTeLtdTaiwanBranch = 0x0200
ARTiming = 0x0201
RigadoLLC = 0x0202
KemppiOy = 0x0203
TapcentiveInc = 0x0204
SmartboticsInc = 0x0205
OtterProductsLLC = 0x0206
STEMPInc = 0x0207
LumiGeekLLC = 0x0208
InvisionHeartInc = 0x0209
MacnicaInc = 0x020a
JaguarLandRoverLimited = 0x020b
CoroWareTechnologiesInc = 0x020c
SimploTechnologyCoLTD = 0x020d
OmronHealthcareCoLTD = 0x020e
ComoduleGMBH = 0x020f
ikeGPS = 0x0210
TelinkSemiconductorCoLtd = 0x0211
InterplanCoLtd = 0x0212
WylerAG = 0x0213
IKMultimediaProductionsrl = 0x0214
LukotonExperienceOy = 0x0215
MTILtd = 0x0216
Tech4homeLda = 0x0217
HiotechAB = 0x0218
DOTTLimited = 0x0219
BlueSpeckLabsLLC = 0x021a
CiscoSystemsInc = 0x021b
MobicommInc = 0x021c
Edamic = 0x021d
GoodnetLtd = 0x021e
LusterLeafProductsInc = 0x021f
ManusMachinaBV = 0x0220
MobiquityNetworksInc = 0x0221
PraxisDynamics = 0x0222
PhilipMorrisProductsSA = 0x0223
ComarchSA = 0x0224
NestlNespressoSA = 0x0225
MerliniaAS = 0x0226
LifeBEAMTechnologies = 0x0227
TwocanoesLabsLLC = 0x0228
MuovertiLimited = 0x0229
StamerMusikanlagenGMBH = 0x022a
TeslaMotors = 0x022b
PharynksCorporation = 0x022c
Lupine = 0x022d
SiemensAG = 0x022e
HuamiCultureCommunicationCOLTD = 0x022f
FosterElectricCompanyLtd = 0x0230
ETASA = 0x0231
xSensoSolutionsKft = 0x0232
ShenzhenSuLongCommunicationLtd = 0x0233
FengFanTechnologyCoLtd = 0x0234
QrioInc = 0x0235
PitpatpetLtd = 0x0236
MSHelisrl = 0x0237
Trakm8Ltd = 0x0238
JINCOLtd = 0x0239
AlatechTechnology = 0x023a
BeijingCarePulseElectronicTechnologyCoLtd = 0x023b
Awarepoint = 0x023c
ViCentraBV = 0x023d
RavenIndustries = 0x023e
WaveWareTechnologies = 0x023f
ArgenoxTechnologies = 0x0240
BragiGmbH = 0x0241
SixteenLabInc = 0x0242
MasimoCorp = 0x0243
IoteraInc = 0x0244
EndressHauser = 0x0245
ACKmeNetworksInc = 0x0246
FiftyThreeInc = 0x0247
ParkerHannifinCorp = 0x0248
TranscranialLtd = 0x0249
UwatecAG = 0x024a
OrlanLLC = 0x024b
BlueCloverDevices = 0x024c
MWaySolutionsGmbH = 0x024d
MicrotronicsEngineeringGmbH = 0x024e
SchneiderSchreibgerteGmbH = 0x024f
SapphireCircuitsLLC = 0x0250
LumoBodytechInc = 0x0251
UKCTechnosolution = 0x0252
XicatoInc = 0x0253
Playbrush = 0x0254
DaiNipponPrintingCoLtd = 0x0255
G24PowerLimited = 0x0256
AdBabbleLocalCommerceInc = 0x0257
DevialetSA = 0x0258
ALTYOR = 0x0259
UniversityofAppliedSciencesValaisHauteEcoleValaisanne = 0x025a
FiveInteractiveLLCdbaZendo = 0x025b
NetEaseNetworkcoLtd = 0x025c
LexmarkInternationalInc = 0x025d
FlukeCorporation = 0x025e
YardarmTechnologies = 0x025f
SensaRx = 0x0260
SECVREGmbH = 0x0261
GlacialRidgeTechnologies = 0x0262
IdentivInc = 0x0263
DDSInc = 0x0264
SMKCorporation = 0x0265
SchawbelTechnologiesLLC = 0x0266
XMISystemsSA = 0x0267
Cerevo = 0x0268
TorroxGmbHCoKG = 0x0269
Gemalto = 0x026a
DEKAResearchDevelopmentCorp = 0x026b
DomsterTadeuszSzydlowski = 0x026c
TechnogymSPA = 0x026d
FLEURBAEYBVBA = 0x026e
AptcodeSolutions = 0x026f
LSIADLTechnology = 0x0270
AnimasCorp = 0x0271
AlpsElectricCoLtd = 0x0272
OCEASOFT = 0x0273
MotsaiResearch = 0x0274
Geotab = 0x0275
EGOElektroGertebauGmbH = 0x0276
bewhereinc = 0x0277
JohnsonOutdoorsInc = 0x0278
steuteSchaltgerateGmbHCoKG = 0x0279
Ekominiinc = 0x027a
DEFAAS = 0x027b
AseptikaLtd = 0x027c
HUAWEITechnologiesCoLtd = 0x027d
HabitAwareLLC = 0x027e
ruwidoaustriagmbh = 0x027f
ITECcorporation = 0x0280
StoneL = 0x0281
SonovaAG = 0x0282
MavenMachinesInc = 0x0283
SynapseElectronics = 0x0284
StandardInnovationInc = 0x0285
RFCodeInc = 0x0286
WallyVenturesSL = 0x0287
WillowbankElectronicsLtd = 0x0288
SKTelecom = 0x0289
JetroAS = 0x028a
CodeGearsLTD = 0x028b
NANOLINKAPS = 0x028c
IFLLC = 0x028d
RFDigitalCorp = 0x028e
ChurchDwightCoInc = 0x028f
MultibitOy = 0x0290
CliniCloudInc = 0x0291
SwiftSensors = 0x0292
BlueBite = 0x0293
ELIASGmbH = 0x0294
SivantosGmbH = 0x0295
Petzl = 0x0296
stormpowerltd = 0x0297
EISSTLtd = 0x0298
InexessTechnologySimmaKG = 0x0299
CurrantInc = 0x029a
C2DevelopmentInc = 0x029b
BlueSkyScientificLLCA = 0x029c
ALOTTAZSLABSLLC = 0x029d
Kupsonspolsro = 0x029e
AreusEngineeringGmbH = 0x029f
ImpossibleCameraGmbH = 0x02a0
InventureTrackSystems = 0x02a1
LockedUp = 0x02a2
Itude = 0x02a3
PacificLockCompany = 0x02a4
TendyronCorporation = 0x02a5
RobertBoschGmbH = 0x02a6
IlluxtroninternationalBV = 0x02a7
miSportLtd = 0x02a8
Chargelib = 0x02a9
DopplerLab = 0x02aa
BBPOSLimited = 0x02ab
RTBElektronikGmbHCoKG = 0x02ac
RxNetworksInc = 0x02ad
WeatherFlowInc = 0x02ae
TechnicolorUSAInc = 0x02af
BestechnicLtd = 0x02b0
RadenInc = 0x02b1
JouZenOy = 0x02b2
CLABERSPA = 0x02b3
HyginexInc = 0x02b4
HANSHINELECTRICRAILWAYCOLTD = 0x02b5
SchneiderElectric = 0x02b6
OortTechnologiesLLC = 0x02b7
ChronoTherapeutics = 0x02b8
RinnaiCorporation = 0x02b9
SwissprimeTechnologiesAG = 0x02ba
KohaCoLtd = 0x02bb
GenevacLtd = 0x02bc
Chemtronics = 0x02bd
SeguroTechnologySpzoo = 0x02be
RedbirdFlightSimulations = 0x02bf
DashRobotics = 0x02c0
LINECorporation = 0x02c1
GuillemotCorporation = 0x02c2
TechtronicPowerToolsTechnologyLimited = 0x02c3
WilsonSportingGoods = 0x02c4
LenovoPteLtd = 0x02c5
AyatanSensors = 0x02c6
ElectronicsTomorrowLimited = 0x02c7
VASCODataSecurityInternationalInc = 0x02c8
PayRangeInc = 0x02c9
ABOVSemiconductor = 0x02ca
AINAWirelessInc = 0x02cb
EijkelkampSoilWater = 0x02cc
BMAergonomicsbv = 0x02cd
TevaBrandedPharmaceuticalProductsRDInc = 0x02ce
Anima = 0x02cf
ThreeM = 0x02d0
EmpaticaSrl = 0x02d1
AferoInc = 0x02d2
PowercastCorporation = 0x02d3
SecuyouApS = 0x02d4
OMRONCorporation = 0x02d5
SendSolutions = 0x02d6
NIPPONSYSTEMWARECOLTD = 0x02d7
Neosfar = 0x02d8
FlieglAgrartechnikGmbH = 0x02d9
Gilvader = 0x02da
DigiInternationalInc = 0x02db
DeWalchTechnologiesInc = 0x02dc
FlintRehabilitationDevicesLLC = 0x02dd
SamsungSDSCoLtd = 0x02de
BlurProductDevelopment = 0x02df
UniversityofMichigan = 0x02e0
VictronEnergyBV = 0x02e1
NTTdocomo = 0x02e2
CarmanahTechnologiesCorp = 0x02e3
BytestormLtd = 0x02e4
EspressifIncorporated = 0x02e5
Unwire = 0x02e6
ConnectedYardInc = 0x02e7
AmericanMusicEnvironments = 0x02e8
SensogramTechnologiesInc = 0x02e9
FujitsuLimited = 0x02ea
ArdicTechnology = 0x02eb
DeltaSystemsInc = 0x02ec
HTCCorporation = 0x02ed
CitizenHoldingsCoLtd = 0x02ee
SMARTINNOVATIONinc = 0x02ef
BlackratSoftware = 0x02f0
TheIdeaCaveLLC = 0x02f1
GoProInc = 0x02f2
AuthAirInc = 0x02f3
VensiInc = 0x02f4
IndagemTechLLC = 0x02f5
IntemoTechnologies = 0x02f6
DreamVisionscoLtd = 0x02f7
RunteqOyLtd = 0x02f8
IMAGINATIONTECHNOLOGIESLTD = 0x02f9
CoSTARTechnologies = 0x02fa
ClariusMobileHealthCorp = 0x02fb
ShanghaiFrequenMicroelectronicsCoLtd = 0x02fc
UwannaInc = 0x02fd
LierdaScienceTechnologyGroupCoLtd = 0x02fe
SiliconLaboratories = 0x02ff
WorldMotoInc = 0x0300
GiatecScientificInc = 0x0301
LoopDevicesInc = 0x0302
IACAelectronique = 0x0303
MartiansInc = 0x0304
SwippApS = 0x0305
LifeLaboratoryInc = 0x0306
FUJIINDUSTRIALCOLTD = 0x0307
SurefireLLC = 0x0308
DolbyLabs = 0x0309
Ellisys = 0x030a
MagnitudeLightingConverters = 0x030b
HiltiAG = 0x030c
DevdataSrl = 0x030d
Deviceworx = 0x030e
ShortcutLabs = 0x030f
SGLItaliaSrl = 0x0310
PEEQDATA = 0x0311
DucereTechnologiesPvtLtd = 0x0312
DiveNavInc = 0x0313
RIIGAISpzoo = 0x0314
ThermoFisherScientific = 0x0315
AGMeasurematicsPvtLtd = 0x0316
CHUOElectronicsCOLTD = 0x0317
AspentaInternational = 0x0318
EugsterFrismagAG = 0x0319
AmberwirelessGmbH = 0x031a
HQInc = 0x031b
LabSensorSolutions = 0x031c
EnterlabApS = 0x031d
EyefiInc = 0x031e
MetaSystemSpA = 0x031f
SONOELECTRONICSCOLTD = 0x0320
Jewelbots = 0x0321
CompumedicsLimited = 0x0322
RotorBikeComponents = 0x0323
AstroInc = 0x0324
AmotusSolutions = 0x0325
HealthwearTechnologiesLtd = 0x0326
EssexElectronics = 0x0327
GrundfosAS = 0x0328
EargoInc = 0x0329
ElectronicDesignLab = 0x032a
ESYLUX = 0x032b
NIPPONSMTCOLtd = 0x032c
BMinnovationsGmbH = 0x032d
indoormap = 0x032e
OttoQInc = 0x032f
NorthPoleEngineering = 0x0330
ThreeFlaresTechnologiesInc = 0x0331
ElectrocompanietAS = 0x0332
MulTLock = 0x0333
CorentiumAS = 0x0334
EnlightedInc = 0x0335
GISTIC = 0x0336
AJP2HoldingsLLC = 0x0337
COBIGmbH = 0x0338
BlueSkyScientificLLCB = 0x0339
AppceptionInc = 0x033a
CourtneyThorneLimited = 0x033b
Virtuosys = 0x033c
TPVTechnologyLimited = 0x033d
MonitraSA = 0x033e
AutomationComponentsInc = 0x033f
Letsensesrl = 0x0340
EtesianTechnologiesLLC = 0x0341
GERTECBRASILLTDA = 0x0342
DrekkerDevelopmentPtyLtd = 0x0343
WhirlInc = 0x0344
LocusPositioning = 0x0345
AcuityBrandsLightingInc = 0x0346
PreventBiometrics = 0x0347
Arioneo = 0x0348
VersaMe = 0x0349
Vaddio = 0x034a
LibratoneAS = 0x034b
HMElectronicsInc = 0x034c
TASERInternationalInc = 0x034d
SafeTrustInc = 0x034e
HeartlandPaymentSystems = 0x034f
BitstrataSystemsInc = 0x0350
PiepsGmbH = 0x0351
iRidingTechnologyCoLtd = 0x0352
AlphaAudiotronicsInc = 0x0353
TOPPANFORMSCOLTD = 0x0354
SigmaDesignsInc = 0x0355
RESERVED = 0xffff
ALL_16BIT_UUIDS = {
0x0001: "SDP",
0x0003: "RFCOMM",
0x0005: "TCS-BIN",
0x0007: "ATT",
0x0008: "OBEX",
0x000f: "BNEP",
0x0010: "UPNP",
0x0011: "HIDP",
0x0012: "Hardcopy Control Channel",
0x0014: "Hardcopy Data Channel",
0x0016: "Hardcopy Notification",
0x0017: "AVCTP",
0x0019: "AVDTP",
0x001b: "CMTP",
0x001e: "MCAP Control Channel",
0x001f: "MCAP Data Channel",
0x0100: "L2CAP",
# 0x0101 to 0x0fff undefined */
0x1000: "Service Discovery Server Service Class",
0x1001: "Browse Group Descriptor Service Class",
0x1002: "Public Browse Root",
# 0x1003 to 0x1100 undefined */
0x1101: "Serial Port",
0x1102: "LAN Access Using PPP",
0x1103: "Dialup Networking",
0x1104: "IrMC Sync",
0x1105: "OBEX Object Push",
0x1106: "OBEX File Transfer",
0x1107: "IrMC Sync Command",
0x1108: "Headset",
0x1109: "Cordless Telephony",
0x110a: "Audio Source",
0x110b: "Audio Sink",
0x110c: "A/V Remote Control Target",
0x110d: "Advanced Audio Distribution",
0x110e: "A/V Remote Control",
0x110f: "A/V Remote Control Controller",
0x1110: "Intercom",
0x1111: "Fax",
0x1112: "Headset AG",
0x1113: "WAP",
0x1114: "WAP Client",
0x1115: "PANU",
0x1116: "NAP",
0x1117: "GN",
0x1118: "Direct Printing",
0x1119: "Reference Printing",
0x111a: "Basic Imaging Profile",
0x111b: "Imaging Responder",
0x111c: "Imaging Automatic Archive",
0x111d: "Imaging Referenced Objects",
0x111e: "Handsfree",
0x111f: "Handsfree Audio Gateway",
0x1120: "Direct Printing Refrence Objects Service",
0x1121: "Reflected UI",
0x1122: "Basic Printing",
0x1123: "Printing Status",
0x1124: "Human Interface Device Service",
0x1125: "Hardcopy Cable Replacement",
0x1126: "HCR Print",
0x1127: "HCR Scan",
0x1128: "Common ISDN Access",
# 0x1129 and 0x112a undefined */
0x112d: "SIM Access",
0x112e: "Phonebook Access Client",
0x112f: "Phonebook Access Server",
0x1130: "Phonebook Access",
0x1131: "Headset HS",
0x1132: "Message Access Server",
0x1133: "Message Notification Server",
0x1134: "Message Access Profile",
0x1135: "GNSS",
0x1136: "GNSS Server",
0x1137: "3D Display",
0x1138: "3D Glasses",
0x1139: "3D Synchronization",
0x113a: "MPS Profile",
0x113b: "MPS Service",
# 0x113c to 0x11ff undefined */
0x1200: "PnP Information",
0x1201: "Generic Networking",
0x1202: "Generic File Transfer",
0x1203: "Generic Audio",
0x1204: "Generic Telephony",
0x1205: "UPNP Service",
0x1206: "UPNP IP Service",
0x1300: "UPNP IP PAN",
0x1301: "UPNP IP LAP",
0x1302: "UPNP IP L2CAP",
0x1303: "Video Source",
0x1304: "Video Sink",
0x1305: "Video Distribution",
# 0x1306 to 0x13ff undefined */
0x1400: "HDP",
0x1401: "HDP Source",
0x1402: "HDP Sink",
# 0x1403 to 0x17ff undefined */
0x1800: "Generic Access Profile",
0x1801: "Generic Attribute Profile",
0x1802: "Immediate Alert",
0x1803: "Link Loss",
0x1804: "Tx Power",
0x1805: "Current Time Service",
0x1806: "Reference Time Update Service",
0x1807: "Next DST Change Service",
0x1808: "Glucose",
0x1809: "Health Thermometer",
0x180a: "Device Information",
# 0x180b and 0x180c undefined */
0x180d: "Heart Rate",
0x180e: "Phone Alert Status Service",
0x180f: "Battery Service",
0x1810: "Blood Pressure",
0x1811: "Alert Notification Service",
0x1812: "Human Interface Device",
0x1813: "Scan Parameters",
0x1814: "Running Speed and Cadence",
0x1815: "Automation IO",
0x1816: "Cycling Speed and Cadence",
# 0x1817 undefined */
0x1818: "Cycling Power",
0x1819: "Location and Navigation",
0x181a: "Environmental Sensing",
0x181b: "Body Composition",
0x181c: "User Data",
0x181d: "Weight Scale",
0x181e: "Bond Management",
0x181f: "Continuous Glucose Monitoring",
0x1820: "Internet Protocol Support",
0x1821: "Indoor Positioning",
0x1822: "Pulse Oximeter",
0x1823: "HTTP Proxy",
0x1824: "Transport Discovery",
0x1825: "Object Transfer",
# 0x1824 to 0x27ff undefined */
0x2800: "Primary Service",
0x2801: "Secondary Service",
0x2802: "Include",
0x2803: "Characteristic",
# 0x2804 to 0x28ff undefined */
0x2900: "Characteristic Extended Properties",
0x2901: "Characteristic User Description",
0x2902: "Client Characteristic Configuration",
0x2903: "Server Characteristic Configuration",
0x2904: "Characteristic Format",
0x2905: "Characteristic Aggregate Formate",
0x2906: "Valid Range",
0x2907: "External Report Reference",
0x2908: "Report Reference",
0x2909: "Number of Digitals",
0x290a: "Value Trigger Setting",
0x290b: "Environmental Sensing Configuration",
0x290c: "Environmental Sensing Measurement",
0x290d: "Environmental Sensing Trigger Setting",
0x290e: "Time Trigger Setting",
# 0x290f to 0x29ff undefined */
0x2a00: "Device Name",
0x2a01: "Appearance",
0x2a02: "Peripheral Privacy Flag",
0x2a03: "Reconnection Address",
0x2a04: "Peripheral Preferred Connection Parameters",
0x2a05: "Service Changed",
0x2a06: "Alert Level",
0x2a07: "Tx Power Level",
0x2a08: "Date Time",
0x2a09: "Day of Week",
0x2a0a: "Day Date Time",
# 0x2a0b undefined */
0x2a0c: "Exact Time 256",
0x2a0d: "DST Offset",
0x2a0e: "Time Zone",
0x2a0f: "Local Time Information",
# 0x2a10 undefined */
0x2a11: "Time with DST",
0x2a12: "Time Accuracy",
0x2a13: "Time Source",
0x2a14: "Reference Time Information",
# 0x2a15 undefined */
0x2a16: "Time Update Control Point",
0x2a17: "Time Update State",
0x2a18: "Glucose Measurement",
0x2a19: "Battery Level",
# 0x2a1a and 0x2a1b undefined */
0x2a1c: "Temperature Measurement",
0x2a1d: "Temperature Type",
0x2a1e: "Intermediate Temperature",
# 0x2a1f and 0x2a20 undefined */
0x2a21: "Measurement Interval",
0x2a22: "Boot Keyboard Input Report",
0x2a23: "System ID",
0x2a24: "Model Number String",
0x2a25: "Serial Number String",
0x2a26: "Firmware Revision String",
0x2a27: "Hardware Revision String",
0x2a28: "Software Revision String",
0x2a29: "Manufacturer Name String",
0x2a2a: "IEEE 11073-20601 Regulatory Cert. Data List",
0x2a2b: "Current Time",
0x2a2c: "Magnetic Declination",
# 0x2a2d to 0x2a30 undefined */
0x2a31: "Scan Refresh",
0x2a32: "Boot Keyboard Output Report",
0x2a33: "Boot Mouse Input Report",
0x2a34: "Glucose Measurement Context",
0x2a35: "Blood Pressure Measurement",
0x2a36: "Intermediate Cuff Pressure",
0x2a37: "Heart Rate Measurement",
0x2a38: "Body Sensor Location",
0x2a39: "Heart Rate Control Point",
# 0x2a3a to 0x2a3e undefined */
0x2a3f: "Alert Status",
0x2a40: "Ringer Control Point",
0x2a41: "Ringer Setting",
0x2a42: "Alert Category ID Bit Mask",
0x2a43: "Alert Category ID",
0x2a44: "Alert Notification Control Point",
0x2a45: "Unread Alert Status",
0x2a46: "New Alert",
0x2a47: "Supported New Alert Category",
0x2a48: "Supported Unread Alert Category",
0x2a49: "Blood Pressure Feature",
0x2a4a: "HID Information",
0x2a4b: "Report Map",
0x2a4c: "HID Control Point",
0x2a4d: "Report",
0x2a4e: "Protocol Mode",
0x2a4f: "Scan Interval Window",
0x2a50: "PnP ID",
0x2a51: "Glucose Feature",
0x2a52: "Record Access Control Point",
0x2a53: "RSC Measurement",
0x2a54: "RSC Feature",
0x2a55: "SC Control Point",
0x2a56: "Digital",
# 0x2a57 undefined */
0x2a58: "Analog",
# 0x2a59 undefined */
0x2a5a: "Aggregate",
0x2a5b: "CSC Measurement",
0x2a5c: "CSC Feature",
0x2a5d: "Sensor Location",
# 0x2a5e to 0x2a62 undefined */
0x2a63: "Cycling Power Measurement",
0x2a64: "Cycling Power Vector",
0x2a65: "Cycling Power Feature",
0x2a66: "Cycling Power Control Point",
0x2a67: "Location and Speed",
0x2a68: "Navigation",
0x2a69: "Position Quality",
0x2a6a: "LN Feature",
0x2a6b: "LN Control Point",
0x2a6c: "Elevation",
0x2a6d: "Pressure",
0x2a6e: "Temperature",
0x2a6f: "Humidity",
0x2a70: "True Wind Speed",
0x2a71: "True Wind Direction",
0x2a72: "Apparent Wind Speed",
0x2a73: "Apparent Wind Direction",
0x2a74: "Gust Factor",
0x2a75: "Pollen Concentration",
0x2a76: "UV Index",
0x2a77: "Irradiance",
0x2a78: "Rainfall",
0x2a79: "Wind Chill",
0x2a7a: "Heat Index",
0x2a7b: "Dew Point",
0x2a7c: "Trend",
0x2a7d: "Descriptor Value Changed",
0x2a7e: "Aerobic Heart Rate Lower Limit",
0x2a7f: "Aerobic Threshold",
0x2a80: "Age",
0x2a81: "Anaerobic Heart Rate Lower Limit",
0x2a82: "Anaerobic Heart Rate Upper Limit",
0x2a83: "Anaerobic Threshold",
0x2a84: "Aerobic Heart Rate Upper Limit",
0x2a85: "Date of Birth",
0x2a86: "Date of Threshold Assessment",
0x2a87: "Email Address",
0x2a88: "Fat Burn Heart Rate Lower Limit",
0x2a89: "Fat Burn Heart Rate Upper Limit",
0x2a8a: "First Name",
0x2a8b: "Five Zone Heart Rate Limits",
0x2a8c: "Gender",
0x2a8d: "Heart Rate Max",
0x2a8e: "Height",
0x2a8f: "Hip Circumference",
0x2a90: "Last Name",
0x2a91: "Maximum Recommended Heart Rate",
0x2a92: "Resting Heart Rate",
0x2a93: "Sport Type for Aerobic/Anaerobic Thresholds",
0x2a94: "Three Zone Heart Rate Limits",
0x2a95: "Two Zone Heart Rate Limit",
0x2a96: "VO2 Max",
0x2a97: "Waist Circumference",
0x2a98: "Weight",
0x2a99: "Database Change Increment",
0x2a9a: "User Index",
0x2a9b: "Body Composition Feature",
0x2a9c: "Body Composition Measurement",
0x2a9d: "Weight Measurement",
0x2a9e: "Weight Scale Feature",
0x2a9f: "User Control Point",
0x2aa0: "Magnetic Flux Density - 2D",
0x2aa1: "Magnetic Flux Density - 3D",
0x2aa2: "Language",
0x2aa3: "Barometric Pressure Trend",
0x2aa4: "Bond Management Control Point",
0x2aa5: "Bond Management Feature",
0x2aa6: "Central Address Resolution",
0x2aa7: "CGM Measurement",
0x2aa8: "CGM Feature",
0x2aa9: "CGM Status",
0x2aaa: "CGM Session Start Time",
0x2aab: "CGM Session Run Time",
0x2aac: "CGM Specific Ops Control Point",
0x2aad: "Indoor Positioning Configuration",
0x2aae: "Latitude",
0x2aaf: "Longitude",
0x2ab0: "Local North Coordinate",
0x2ab1: "Local East Coordinate",
0x2ab2: "Floor Number",
0x2ab3: "Altitude",
0x2ab4: "Uncertainty",
0x2ab5: "Location Name",
0x2ab6: "URI",
0x2ab7: "HTTP Headers",
0x2ab8: "HTTP Status Code",
0x2ab9: "HTTP Entity Body",
0x2aba: "HTTP Control Point",
0x2abb: "HTTPS Security",
0x2abc: "TDS Control Point",
0x2abd: "OTS Feature",
0x2abe: "Object Name",
0x2abf: "Object Type",
0x2ac0: "Object Size",
0x2ac1: "Object First-Created",
0x2ac2: "Object Last-Modified",
0x2ac3: "Object ID",
0x2ac4: "Object Properties",
0x2ac5: "Object Action Control Point",
0x2ac6: "Object List Control Point",
0x2ac7: "Object List Filter",
0x2ac8: "Object Changed",
# vendor defined */
0xfeff: "GN Netcom",
0xfefe: "GN ReSound A/S",
0xfefd: "Gimbal, Inc.",
0xfefc: "Gimbal, Inc.",
0xfefb: "Stollmann E+V GmbH",
0xfefa: "PayPal, Inc.",
0xfef9: "PayPal, Inc.",
0xfef8: "Aplix Corporation",
0xfef7: "Aplix Corporation",
0xfef6: "Wicentric, Inc.",
0xfef5: "Dialog Semiconductor GmbH",
0xfef4: "Google",
0xfef3: "Google",
0xfef2: "CSR",
0xfef1: "CSR",
0xfef0: "Intel",
0xfeef: "Polar Electro Oy",
0xfeee: "Polar Electro Oy",
0xfeed: "Tile, Inc.",
0xfeec: "Tile, Inc.",
0xfeeb: "Swirl Networks, Inc.",
0xfeea: "Swirl Networks, Inc.",
0xfee9: "Quintic Corp.",
0xfee8: "Quintic Corp.",
0xfee7: "Tencent Holdings Limited",
0xfee6: "Seed Labs, Inc.",
0xfee5: "Nordic Semiconductor ASA",
0xfee4: "Nordic Semiconductor ASA",
0xfee3: "Anki, Inc.",
0xfee2: "Anki, Inc.",
0xfee1: "Anhui Huami Information Technology Co.",
0xfee0: "Anhui Huami Information Technology Co.",
0xfedf: "Design SHIFT",
0xfede: "Coin, Inc.",
0xfedd: "Jawbone",
0xfedc: "Jawbone",
0xfedb: "Perka, Inc.",
0xfeda: "ISSC Technologies Corporation",
0xfed9: "Pebble Technology Corporation",
0xfed8: "Google",
0xfed7: "Broadcom Corporation",
0xfed6: "Broadcom Corporation",
0xfed5: "Plantronics Inc.",
0xfed4: "Apple, Inc.",
0xfed3: "Apple, Inc.",
0xfed2: "Apple, Inc.",
0xfed1: "Apple, Inc.",
0xfed0: "Apple, Inc.",
0xfecf: "Apple, Inc.",
0xfece: "Apple, Inc.",
0xfecd: "Apple, Inc.",
0xfecc: "Apple, Inc.",
0xfecb: "Apple, Inc.",
0xfeca: "Apple, Inc.",
0xfec9: "Apple, Inc.",
0xfec8: "Apple, Inc.",
0xfec7: "Apple, Inc.",
0xfec6: "Kocomojo, LLC",
0xfec5: "Realtek Semiconductor Corp.",
0xfec4: "PLUS Location Systems",
0xfec3: "360fly, Inc.",
0xfec2: "Blue Spark Technologies, Inc.",
0xfec1: "KDDI Corporation",
0xfec0: "KDDI Corporation",
0xfebf: "Nod, Inc.",
0xfebe: "Bose Corporation",
0xfebd: "Clover Network, Inc.",
0xfebc: "Dexcom, Inc.",
0xfebb: "adafruit industries",
0xfeba: "Tencent Holdings Limited",
0xfeb9: "LG Electronics",
0xfeb8: "Facebook, Inc.",
0xfeb7: "Facebook, Inc.",
0xfeb6: "Vencer Co, Ltd",
0xfeb5: "WiSilica Inc.",
0xfeb4: "WiSilica Inc.",
0xfeb3: "Taobao",
0xfeb2: "Microsoft Corporation",
0xfeb1: "Electronics Tomorrow Limited",
0xfeb0: "Nest Labs Inc.",
0xfeaf: "Nest Labs Inc.",
0xfeae: "Nokia Corporation",
0xfead: "Nokia Corporation",
0xfeac: "Nokia Corporation",
0xfeab: "Nokia Corporation",
0xfeaa: "Google",
0xfea9: "Savant Systems LLC",
0xfea8: "Savant Systems LLC",
0xfea7: "UTC Fire and Security",
0xfea6: "GoPro, Inc.",
0xfea5: "GoPro, Inc.",
0xfea4: "Paxton Access Ltd",
0xfea3: "ITT Industries",
0xfea2: "Intrepid Control Systems, Inc.",
0xfea1: "Intrepid Control Systems, Inc.",
0xfea0: "Google",
0xfe9f: "Google",
0xfe9e: "Dialog Semiconductor B.V.",
0xfe9d: "Mobiquity Networks Inc",
0xfe9c: "GSI Laboratories, Inc.",
0xfe9b: "Samsara Networks, Inc",
0xfe9a: "Estimote",
0xfe99: "Currant, Inc.",
0xfe98: "Currant, Inc.",
0xfe97: "Tesla Motor Inc.",
0xfe96: "Tesla Motor Inc.",
0xfe95: "Xiaomi Inc.",
0xfe94: "OttoQ Inc.",
0xfe93: "OttoQ Inc.",
0xfe92: "Jarden Safety & Security",
0xfe91: "Shanghai Imilab Technology Co.,Ltd",
0xfe90: "JUMA",
0xfe8f: "CSR",
0xfe8e: "ARM Ltd",
0xfe8d: "Interaxon Inc.",
0xfe8c: "TRON Forum",
0xfe8b: "Apple, Inc.",
0xfe8a: "Apple, Inc.",
0xfe89: "B&O Play A/S",
0xfe88: "SALTO SYSTEMS S.L.",
0xfe87: "Qingdao Yeelink Information Technology Co., Ltd. ( 青岛亿联客信息技术有限公司 )",
0xfe86: "HUAWEI Technologies Co., Ltd. ( 华为技术有限公司 )",
0xfe85: "RF Digital Corp",
0xfe84: "RF Digital Corp",
0xfe83: "Blue Bite",
0xfe82: "Medtronic Inc.",
0xfe81: "Medtronic Inc.",
0xfe80: "Doppler Lab",
0xfe7f: "Doppler Lab",
0xfe7e: "Awear Solutions Ltd",
0xfe7d: "Aterica Health Inc.",
0xfe7c: "Stollmann E+V GmbH",
0xfe7b: "Orion Labs, Inc.",
0xfe7a: "Bragi GmbH",
0xfe79: "Zebra Technologies",
0xfe78: "Hewlett-Packard Company",
0xfe77: "Hewlett-Packard Company",
0xfe76: "TangoMe",
0xfe75: "TangoMe",
0xfe74: "unwire",
0xfe73: "St. Jude Medical, Inc.",
0xfe72: "St. Jude Medical, Inc.",
0xfe71: "Plume Design Inc",
0xfe70: "Beijing Jingdong Century Trading Co., Ltd.",
0xfe6f: "LINE Corporation",
0xfe6e: "The University of Tokyo",
0xfe6d: "The University of Tokyo",
0xfe6c: "TASER International, Inc.",
0xfe6b: "TASER International, Inc.",
0xfe6a: "Kontakt Micro-Location Sp. z o.o.",
0xfe69: "Qualcomm Life Inc",
0xfe68: "Qualcomm Life Inc",
0xfe67: "Lab Sensor Solutions",
0xfe66: "Intel Corporation",
# SDO defined */
0xfffe: "Alliance for Wireless Power (A4WP)",
0xfffd: "Fast IDentity Online Alliance (FIDO)",
}
ALL_128BIT_UUIDS = {
"a3c87500-8ed3-4bdf-8a39-a01bebede295": "Eddystone Configuration Service",
"a3c87501-8ed3-4bdf-8a39-a01bebede295": "Capabilities",
"a3c87502-8ed3-4bdf-8a39-a01bebede295": "Active Slot",
"a3c87503-8ed3-4bdf-8a39-a01bebede295": "Advertising Interval",
"a3c87504-8ed3-4bdf-8a39-a01bebede295": "Radio Tx Power",
"a3c87505-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Advertised Tx Power",
"a3c87506-8ed3-4bdf-8a39-a01bebede295": "Lock State",
"a3c87507-8ed3-4bdf-8a39-a01bebede295": "Unlock",
"a3c87508-8ed3-4bdf-8a39-a01bebede295": "Public ECDH Key",
"a3c87509-8ed3-4bdf-8a39-a01bebede295": "EID Identity Key",
"a3c8750a-8ed3-4bdf-8a39-a01bebede295": "ADV Slot Data",
"a3c8750b-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Factory reset",
"a3c8750c-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Remain Connectable",
# BBC micro:bit Bluetooth Profiles */
"e95d0753-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Service",
"e95dca4b-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Data",
"e95dfb24-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Period",
"e95df2d8-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Service",
"e95dfb11-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Data",
"e95d386c-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Period",
"e95d9715-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Bearing",
"e95d9882-251d-470a-a062-fa1922dfa9a8": "MicroBit Button Service",
"e95dda90-251d-470a-a062-fa1922dfa9a8": "MicroBit Button A State",
"e95dda91-251d-470a-a062-fa1922dfa9a8": "MicroBit Button B State",
"e95d127b-251d-470a-a062-fa1922dfa9a8": "MicroBit IO PIN Service",
"e95d8d00-251d-470a-a062-fa1922dfa9a8": "MicroBit PIN Data",
"e95d5899-251d-470a-a062-fa1922dfa9a8": "MicroBit PIN AD Configuration",
"e95dd822-251d-470a-a062-fa1922dfa9a8": "MicroBit PWM Control",
"e95dd91d-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Service",
"e95d7b77-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Matrix state",
"e95d93ee-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Text",
"e95d0d2d-251d-470a-a062-fa1922dfa9a8": "MicroBit Scrolling Delay",
"e95d93af-251d-470a-a062-fa1922dfa9a8": "MicroBit Event Service",
"e95db84c-251d-470a-a062-fa1922dfa9a8": "MicroBit Requirements",
"e95d9775-251d-470a-a062-fa1922dfa9a8": "MicroBit Event Data",
"e95d23c4-251d-470a-a062-fa1922dfa9a8": "MicroBit Client Requirements",
"e95d5404-251d-470a-a062-fa1922dfa9a8": "MicroBit Client Events",
"e95d93b0-251d-470a-a062-fa1922dfa9a8": "MicroBit DFU Control Service",
"e95d93b1-251d-470a-a062-fa1922dfa9a8": "MicroBit DFU Control",
"e95d6100-251d-470a-a062-fa1922dfa9a8": "MicroBit Temperature Service",
"e95d1b25-251d-470a-a062-fa1922dfa9a8": "MicroBit Temperature Period",
# Nordic UART Port Emulation */
"6e400001-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART Service",
"6e400002-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART TX",
"6e400003-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART RX",
}
def uuid_to_string(uuid):
"""
For a given UUID string, try to determine the textual equivalent
of the GATT service or characteristic.
"""
if not isinstance(uuid, str):
raise TypeError("Expected a UUID string.")
if len(uuid) != 36:
raise ValueError("Expected the UUID string to be 36 characters long.")
uuid_text = ALL_128BIT_UUIDS.get(uuid, None)
if uuid_text is not None:
return uuid_text
else:
if uuid.endswith("-0000-1000-8000-00805f9b34fb"):
uuid_service = int(uuid[:8], 16)
return ALL_16BIT_UUIDS.get(uuid_service, None)
else:
return None
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n#!/usr/bin/env python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# Copyright (c) 2010-2011 OpenStack, LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file exce... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n#!/usr/bin/env python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# Copyright (c) 2010-2011 OpenStack, LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not us... | ```python
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TOKEN-BASED AUTH MIDDLEWARE
This WSGI component performs multiple jobs:
* it verifies that incoming client requests have valid tokens by verifying
tokens with the auth service.
* it will reject unauthenticated requests UNLESS it is in 'delay_auth_decision'
mode, which means the final decision is delegated to the downstream WSGI
component (usually the OpenStack service)
* it will collect and forward identity information from a valid token
such as user name etc...
Refer to: http://wiki.openstack.org/openstack-authn
HEADERS
-------
* Headers starting with HTTP\_ is a standard http header
* Headers starting with HTTP_X is an extended http header
Coming in from initial call from client or customer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
HTTP_X_AUTH_TOKEN
the client token being passed in
HTTP_X_STORAGE_TOKEN
the client token being passed in (legacy Rackspace use) to support
cloud files
Used for communication between components
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
www-authenticate
only used if this component is being used remotely
HTTP_AUTHORIZATION
basic auth password used to validate the connection
What we add to the request for use by the OpenStack service
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
HTTP_X_AUTHORIZATION
the client identity being passed in
"""
import eventlet
from eventlet import wsgi
import httplib
import json
import os
from paste.deploy import loadapp
from urlparse import urlparse
from webob.exc import HTTPUnauthorized
from webob.exc import Request, Response
import keystone.tools.tracer # @UnusedImport # module runs on import
from keystone.common.bufferedhttp import http_connect_raw as http_connect
PROTOCOL_NAME = "Token Authentication"
class AuthProtocol(object):
"""Auth Middleware that handles authenticating client calls"""
def _init_protocol_common(self, app, conf):
""" Common initialization code"""
print "Starting the %s component" % PROTOCOL_NAME
self.conf = conf
self.app = app
#if app is set, then we are in a WSGI pipeline and requests get passed
# on to app. If it is not set, this component should forward requests
# where to find the OpenStack service (if not in local WSGI chain)
# these settings are only used if this component is acting as a proxy
# and the OpenSTack service is running remotely
self.service_protocol = conf.get('service_protocol', 'https')
self.service_host = conf.get('service_host')
self.service_port = int(conf.get('service_port'))
self.service_url = '%s://%s:%s' % (self.service_protocol,
self.service_host,
self.service_port)
# used to verify this component with the OpenStack service or PAPIAuth
self.service_pass = conf.get('service_pass')
# delay_auth_decision means we still allow unauthenticated requests
# through and we let the downstream service make the final decision
self.delay_auth_decision = int(conf.get('delay_auth_decision', 0))
def _init_protocol(self, conf):
""" Protocol specific initialization """
# where to find the auth service (we use this to validate tokens)
self.auth_host = conf.get('auth_host')
self.auth_port = int(conf.get('auth_port'))
self.auth_protocol = conf.get('auth_protocol', 'https')
# where to tell clients to find the auth service (default to url
# constructed based on endpoint we have for the service to use)
self.auth_location = conf.get('auth_uri',
"%s://%s:%s" % (self.auth_protocol,
self.auth_host,
self.auth_port))
# Credentials used to verify this component with the Auth service since
# validating tokens is a privileged call
self.admin_token = conf.get('admin_token')
def __init__(self, app, conf):
""" Common initialization code """
#TODO(ziad): maybe we refactor this into a superclass
self._init_protocol_common(app, conf) # Applies to all protocols
self._init_protocol(conf) # Specific to this protocol
def __call__(self, env, start_response):
""" Handle incoming request. Authenticate. And send downstream. """
#Prep headers to forward request to local or remote downstream service
proxy_headers = env.copy()
for header in proxy_headers.iterkeys():
if header[0:5] == 'HTTP_':
proxy_headers[header[5:]] = proxy_headers[header]
del proxy_headers[header]
#Look for authentication claims
claims = self._get_claims(env)
if not claims:
#No claim(s) provided
if self.delay_auth_decision:
#Configured to allow downstream service to make final decision.
#So mark status as Invalid and forward the request downstream
self._decorate_request("X_IDENTITY_STATUS",
"Invalid", env, proxy_headers)
else:
#Respond to client as appropriate for this auth protocol
return self._reject_request(env, start_response)
else:
# this request is presenting claims. Let's validate them
valid = self._validate_claims(claims)
if not valid:
# Keystone rejected claim
if self.delay_auth_decision:
# Downstream service will receive call still and decide
self._decorate_request("X_IDENTITY_STATUS",
"Invalid", env, proxy_headers)
else:
#Respond to client as appropriate for this auth protocol
return self._reject_claims(env, start_response)
else:
self._decorate_request("X_IDENTITY_STATUS",
"Confirmed", env, proxy_headers)
#Collect information about valid claims
if valid:
claims = self._expound_claims(claims)
# Store authentication data
if claims:
self._decorate_request('X_AUTHORIZATION', "Proxy %s" %
claims['user'], env, proxy_headers)
# For legacy compatibility before we had ID and Name
self._decorate_request('X_TENANT',
claims['tenant'], env, proxy_headers)
# Services should use these
self._decorate_request('X_TENANT_NAME',
claims.get('tenant_name', claims['tenant']),
env, proxy_headers)
self._decorate_request('X_TENANT_ID',
claims['tenant'], env, proxy_headers)
self._decorate_request('X_USER',
claims['user'], env, proxy_headers)
if 'roles' in claims and len(claims['roles']) > 0:
if claims['roles'] != None:
roles = ''
for role in claims['roles']:
if len(roles) > 0:
roles += ','
roles += role
self._decorate_request('X_ROLE',
roles, env, proxy_headers)
# NOTE(todd): unused
self.expanded = True
#Send request downstream
return self._forward_request(env, start_response, proxy_headers)
# NOTE(todd): unused
def get_admin_auth_token(self, username, password):
"""
This function gets an admin auth token to be used by this service to
validate a user's token. Validate_token is a priviledged call so
it needs to be authenticated by a service that is calling it
"""
headers = {"Content-type": "application/json",
"Accept": "application/json"}
params = {"passwordCredentials": {"username": username,
"password": password,
"tenantId": "1"}}
conn = httplib.HTTPConnection("%s:%s" \
% (self.auth_host, self.auth_port))
conn.request("POST", "/v2.0/tokens", json.dumps(params), \
headers=headers)
response = conn.getresponse()
data = response.read()
return data
def _get_claims(self, env):
"""Get claims from request"""
claims = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
return claims
def _reject_request(self, env, start_response):
"""Redirect client to auth server"""
return HTTPUnauthorized("Authentication required",
[("WWW-Authenticate",
"Keystone uri='%s'" % self.auth_location)])(env,
start_response)
def _reject_claims(self, env, start_response):
"""Client sent bad claims"""
return HTTPUnauthorized()(env,
start_response)
def _validate_claims(self, claims):
"""Validate claims, and provide identity information isf applicable """
# Step 1: We need to auth with the keystone service, so get an
# admin token
#TODO(ziad): Need to properly implement this, where to store creds
# for now using token from ini
#auth = self.get_admin_auth_token("admin", "secrete", "1")
#admin_token = json.loads(auth)["auth"]["token"]["id"]
# Step 2: validate the user's token with the auth service
# since this is a priviledged op,m we need to auth ourselves
# by using an admin token
headers = {"Content-type": "application/json",
"Accept": "application/json",
"X-Auth-Token": self.admin_token}
##TODO(ziad):we need to figure out how to auth to keystone
#since validate_token is a priviledged call
#Khaled's version uses creds to get a token
# "X-Auth-Token": admin_token}
# we're using a test token from the ini file for now
conn = http_connect(self.auth_host, self.auth_port, 'GET',
'/v2.0/tokens/%s' % claims, headers=headers)
resp = conn.getresponse()
# data = resp.read()
conn.close()
if not str(resp.status).startswith('20'):
# Keystone rejected claim
return False
else:
#TODO(Ziad): there is an optimization we can do here. We have just
#received data from Keystone that we can use instead of making
#another call in _expound_claims
return True
def _expound_claims(self, claims):
# Valid token. Get user data and put it in to the call
# so the downstream service can use it
headers = {"Content-type": "application/json",
"Accept": "application/json",
"X-Auth-Token": self.admin_token}
##TODO(ziad):we need to figure out how to auth to keystone
#since validate_token is a priviledged call
#Khaled's version uses creds to get a token
# "X-Auth-Token": admin_token}
# we're using a test token from the ini file for now
conn = http_connect(self.auth_host, self.auth_port, 'GET',
'/v2.0/tokens/%s' % claims, headers=headers)
resp = conn.getresponse()
data = resp.read()
conn.close()
if not str(resp.status).startswith('20'):
raise LookupError('Unable to locate claims: %s' % resp.status)
token_info = json.loads(data)
roles = []
role_refs = token_info["access"]["user"]["roles"]
if role_refs != None:
for role_ref in role_refs:
# Nova looks for the non case-sensitive role 'Admin'
# to determine admin-ness
roles.append(role_ref["name"])
try:
tenant = token_info['access']['token']['tenant']['id']
tenant_name = token_info['access']['token']['tenant']['name']
except:
tenant = None
tenant_name = None
if not tenant:
tenant = token_info['access']['user'].get('tenantId')
tenant_name = token_info['access']['user'].get('tenantName')
verified_claims = {'user': token_info['access']['user']['username'],
'tenant': tenant,
'roles': roles}
if tenant_name:
verified_claims['tenantName'] = tenant_name
return verified_claims
def _decorate_request(self, index, value, env, proxy_headers):
"""Add headers to request"""
proxy_headers[index] = value
env["HTTP_%s" % index] = value
def _forward_request(self, env, start_response, proxy_headers):
"""Token/Auth processed & claims added to headers"""
self._decorate_request('AUTHORIZATION',
"Basic %s" % self.service_pass, env, proxy_headers)
#now decide how to pass on the call
if self.app:
# Pass to downstream WSGI component
return self.app(env, start_response)
#.custom_start_response)
else:
# We are forwarding to a remote service (no downstream WSGI app)
req = Request(proxy_headers)
parsed = urlparse(req.url)
conn = http_connect(self.service_host,
self.service_port,
req.method,
parsed.path,
proxy_headers,
ssl=(self.service_protocol == 'https'))
resp = conn.getresponse()
data = resp.read()
#TODO(ziad): use a more sophisticated proxy
# we are rewriting the headers now
if resp.status == 401 or resp.status == 305:
# Add our own headers to the list
headers = [("WWW_AUTHENTICATE",
"Keystone uri='%s'" % self.auth_location)]
return Response(status=resp.status, body=data,
headerlist=headers)(env,
start_response)
else:
return Response(status=resp.status, body=data)(env,
start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return AuthProtocol(app, conf)
return auth_filter
def app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return AuthProtocol(None, conf)
if __name__ == "__main__":
app = loadapp("config:" + \
os.path.join(os.path.abspath(os.path.dirname(__file__)),
os.pardir,
os.pardir,
"examples/paste/auth_token.ini"),
global_conf={"log_name": "auth_token.log"})
wsgi.server(eventlet.listen(('', 8090)), app)
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom scipy.stats import binned_statistic as bin_stat\nfrom lif import *\nfrom syn import *\n\nprefs.codegen.target = 'numpy'\ndefaultclock.dt = 1*ms\n\nparams = LifParams(constant_input=3)\nparams.update(SynParams())\nneurons = LifNeurons(... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom scipy.stats import binned_statistic as bin_stat\nfrom lif import *\nfrom syn import *\n\nprefs.codegen.target = 'numpy'\ndefaultclock.dt = 1*ms\n\nparams = LifParams(constant_input=3)\nparams.update(SynParams())\nneuro... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from scipy.stats import binned_statistic as bin_stat
from lif import *
from syn import *
prefs.codegen.target = 'numpy'
defaultclock.dt = 1*ms
params = LifParams(constant_input=3)
params.update(SynParams())
neurons = LifNeurons(1000, params)
excitatory_synapses = ExcitatorySynapses(neurons, params)
excitatory_synapses.connect('i != j and i < 800', p=0.1)
excitatory_synapses.w = 1.0
inhibitory_synapses = InhibitorySynapses(neurons, params)
inhibitory_synapses.connect('i != j and i >= 800', p=0.1)
inhibitory_synapses.w = -1.0
rate_monitor = PopulationRateMonitor(neurons)
spike_monitor = SpikeMonitor(neurons)
network = Network()
network.add(neurons, excitatory_synapses, inhibitory_synapses, rate_monitor, spike_monitor)
network.run(10*second, report='stdout', report_period=1.0*second, namespace={})
figure()
subplot(211)
suptitle('Network Activity')
binned_rate = bin_stat(rate_monitor.t/second, rate_monitor.rate, bins=100)
plot(binned_rate[1][:-1], binned_rate[0])
ylabel('Firing Rate (Hz)')
subplot(212)
plot(spike_monitor.t/second, spike_monitor.i, '.k')
ylabel('Neuron #')
xlabel('Time (s)')
show()
``` |
[
{
"content": "```python\nimport logging\nimport os\nimport pickle\nfrom collections import namedtuple\n\nimport gym\nimport numpy as np\n\nfrom catastrophe_wrapper import *\nfrom catastrophe_wrapper import CatastropheWrapper\nfrom classifier_tf import (SavedCatastropheBlockerTensorflow,\n ... | [
{
"content": "<|memory_start|>```python\nimport logging\nimport os\nimport pickle\nfrom collections import namedtuple\n\nimport gym\nimport numpy as np\n\nfrom catastrophe_wrapper import *\nfrom catastrophe_wrapper import CatastropheWrapper\nfrom classifier_tf import (SavedCatastropheBlockerTensorflow,\n ... | ```python
import logging
import os
import pickle
from collections import namedtuple
import gym
import numpy as np
from catastrophe_wrapper import *
from catastrophe_wrapper import CatastropheWrapper
from classifier_tf import (SavedCatastropheBlockerTensorflow,
SavedCatastropheClassifierTensorflow)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
TOLERANCE = 0.01
PADDLE_COLUMN = {"right": 143, "left": 16}
PADDLE_COLOR = {"right": np.array([92, 186, 92]), "left": np.array([213, 130, 74])}
PLAY_AREA = [34, 34 + 160]
DEFAULT_CLEARANCE = 16
DEFAULT_BLOCK_CLEARANCE = 16
DEFAULT_LOCATION = "bottom"
def paddle_bottom(observation, paddle="right"):
column = observation[:, PADDLE_COLUMN[paddle], :] - PADDLE_COLOR[paddle]
found = (np.sum(np.abs(column), axis=1) < TOLERANCE).astype(np.int)
r = np.argmax(np.flipud(found))
r = (len(found) - r - 1)
if not found[r]:
return None
else:
return r
def paddle_top(observation, paddle="right"):
column = observation[:, PADDLE_COLUMN[paddle], :] - PADDLE_COLOR[paddle]
found = (np.sum(np.abs(column), axis=1) < TOLERANCE).astype(np.int)
r = np.argmax(found)
if not found[r]:
return None
else:
return r
# def ball_center(observation):
# w = np.where(np.abs(observation[:,6:36] - 0.30457518) > TOLERANCE)[:2]
# if len(w[0]) == 0 or len(w[0]) > 4:
# return None
# w = np.mean(w, axis=1)
# return w[0], w[1] + 6
#
# def ball_on_left(observation):
# w = np.where(np.abs(observation[:,6:21] - 0.30457518) > TOLERANCE)[:2]
# return(len(w[0]) > 0)
def print_observation(observation):
L = []
for row in observation:
for value in row:
found = False
for i, v in enumerate(L):
if (v == value).all():
found = True
print(i, end="")
break
if not found:
print(len(L), end="")
L.append(value)
print()
for i, v in enumerate(L):
print(i, v)
def is_catastrophe(obs, location=DEFAULT_LOCATION, clearance=DEFAULT_CLEARANCE):
if location == "bottom":
y = paddle_bottom(obs)
if y is None:
return False
return y > PLAY_AREA[1] - clearance
elif location == "top":
y = paddle_top(obs)
if y is None:
return False
return y < PLAY_AREA[0] + clearance
else:
raise ValueError("Unrecognized location: {}".format(location))
##catastrophe zone on either side
# def is_catastrophe(obs, location=DEFAULT_LOCATION, clearance=DEFAULT_CLEARANCE):
# clearance = 5
# y = paddle_bottom(obs)
# y2 = paddle_top(obs)
# if y is None:
# return False
# return (y > PLAY_AREA[1] - clearance) or (y < PLAY_AREA[0] + clearance)
def should_block(obs,
action,
location=DEFAULT_LOCATION,
clearance=DEFAULT_CLEARANCE,
block_clearance=DEFAULT_BLOCK_CLEARANCE):
if obs is None:
return False
if is_catastrophe(obs, location, clearance):
return False
if location == "top":
# DOWN
if is_catastrophe(obs, location, clearance + block_clearance) and action != 5:
return True
elif location == "bottom":
# UP
if is_catastrophe(obs, location, clearance + block_clearance) and action != 2:
return True
return False
def allowed_actions_heuristic(obs, location):
if location == "top":
return [5]
elif location == "bottom":
return [2]
class CatastropheClassifierHeuristic(object):
def __init__(self, location=DEFAULT_LOCATION, clearance=DEFAULT_CLEARANCE, **_):
self.location = location
self.clearance = clearance
def is_catastrophe(self, obs):
return is_catastrophe(obs, self.location, self.clearance)
def is_catastrophe_with_score(self, obs):
return is_catastrophe(obs, self.location, self.clearance), 0
# class CatastropheClassifierHeuristic2(object):
# def __init__(self):
# pass
#
# def is_catastrophe(self, obs):
# # if True:
# if ball_on_left(obs):
# ltop, lbottom = paddle_top(obs, "left"), paddle_bottom(obs, "left")
# rtop, rbottom = paddle_top(obs, "right"), paddle_bottom(obs, "right")
# print([ltop, lbottom, rtop, rbottom])
# if None not in [ltop, lbottom, rtop, rbottom]:
#
# if rbottom >= ltop and rbottom <= lbottom:
# return True
# if rtop >= ltop and rtop <= lbottom:
# return True
# if rtop < ltop and rbottom > lbottom:
# return True
# return False
class CatastropheBlockerHeuristic(object):
def __init__(self,
location=DEFAULT_LOCATION,
clearance=DEFAULT_CLEARANCE,
block_clearance=DEFAULT_BLOCK_CLEARANCE,
**_):
self.location = location
self.clearance = clearance
self.block_clearance = block_clearance
def should_block(self, obs, action):
return should_block(obs, action, self.location, self.clearance, self.block_clearance)
def should_block_with_score(self, obs, action):
return should_block(obs, action, self.location, self.clearance, self.block_clearance), 0
def allowed_actions(self, obs):
return allowed_actions_heuristic(obs, self.location)
class PongClassifierLabeller(object):
def __init__(self):
pass
def label(self, features, episode):
images = (frame.image for frame in episode.frames if frame.action is not None)
labels = np.array([is_catastrophe(image, location="bottom") for image in images])
return features, labels
class PongBlockerClearanceHeuristicLabeller(object):
def __init__(self,
location=DEFAULT_LOCATION,
clearance=DEFAULT_CLEARANCE,
block_clearance=DEFAULT_BLOCK_CLEARANCE,
**_):
self.location = location
self.clearance = clearance
self.block_clearance = block_clearance
self.blocker = CatastropheBlockerHeuristic(location, clearance, block_clearance)
def __block_with_clearance(self, obs, action, location, clearance, block_clearance):
if is_catastrophe(obs, location, clearance + block_clearance) and action != 2: # 'up' action
return True
else:
return False
def label(self, features, episode):
labels = np.array(
[self.__block_with_clearance(frame.image,
frame.action,
self.location,
self.clearance,
self.block_clearance)
for frame in episode.frames if frame.action is not None])
return features, labels
class PongBlockerLabeller(object):
def __init__(self, block_radius=0):
self.block_radius = block_radius
def label_and_build_mask(self, episode):
is_catastrophe_array = np.array(
[is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
# should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])
labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)
for i in range(len(episode.frames)):
if i + self.block_radius + 1 >= len(episode.frames):
mask[i] = False
continue
if is_catastrophe_array[i]:
mask[i] = False
continue
for j in range(self.block_radius + 1):
if is_catastrophe_array[i + j + 1]:
labels[i] = True
break
return labels, mask
def label(self, features, episode):
labels, mask = self.label_and_build_mask(episode)
labels = labels[mask]
for key, value in features.items():
features[key] = features[key][mask]
assert (len(labels) == len(features[key])), "{} {}".format(
len(labels), len(features[key]))
return features, labels
# return features, labels
``` |
[
{
"content": "Here is a code file:\n```python\nfrom CommonClasses import *\r\nfrom solution import Solution\r\n\r\nclass TestSuite:\r\n \r\n def run(self):\r\n self.test000()\r\n self.test001()\r\n self.test002()\r\n self.test003()\r\n# self.test004()\r\n\r\n def tes... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom CommonClasses import *\r\nfrom solution import Solution\r\n\r\nclass TestSuite:\r\n \r\n def run(self):\r\n self.test000()\r\n self.test001()\r\n self.test002()\r\n self.test003()\r\n# self.test004()\r\... | ```python
from CommonClasses import *
from solution import Solution
class TestSuite:
def run(self):
self.test000()
self.test001()
self.test002()
self.test003()
# self.test004()
def test000(self):
print 'test 000\n'
board = ['ABCE',
'SFCS',
'ADEE']
word = 'ABCCED'
startTime = time.clock()
r = Solution().exist(board, word)
timeUsed = time.clock() - startTime
print ' input:\t{0}, {1}'.format(board, word)
# print ' expect:\t', ?
print ' output:\t{0}'.format(r)
print ' time used:\t{0:.6f}'.format(timeUsed)
print
def test001(self):
print 'test 001\n'
board = ['ABCE',
'SFCS',
'ADEE']
word = 'SEE'
startTime = time.clock()
r = Solution().exist(board, word)
timeUsed = time.clock() - startTime
print ' input:\t{0}, {1}'.format(board, word)
# print ' expect:\t', ?
print ' output:\t{0}'.format(r)
print ' time used:\t{0:.6f}'.format(timeUsed)
print
def test002(self):
print 'test 002\n'
board = ['ABCE',
'SFCS',
'ADEE']
word = 'ABCB'
startTime = time.clock()
r = Solution().exist(board, word)
timeUsed = time.clock() - startTime
print ' input:\t{0}, {1}'.format(board, word)
# print ' expect:\t', ?
print ' output:\t{0}'.format(r)
print ' time used:\t{0:.6f}'.format(timeUsed)
print
def test003(self):
print 'test 003\n'
board = ['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaab']
word = 'baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
startTime = time.clock()
r = Solution().exist(board, word)
timeUsed = time.clock() - startTime
print ' input:\t{0}, {1}'.format(board, word)
# print ' expect:\t', ?
print ' output:\t{0}'.format(r)
print ' time used:\t{0:.6f}'.format(timeUsed)
print
def main(argv):
TestSuite().run()
if __name__ == '__main__':
main(sys.argv)
``` |
[
{
"content": "Replicate the source code:\n```python\nimport collections\n\nimport mpi4py\nimport numpy\n\nimport chainer.cuda\nimport chainer.utils\nfrom chainermn.communicators import _communication_utility\nfrom chainermn.communicators._communication_utility import chunked_bcast_obj\nfrom chainermn.communicat... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nimport collections\n\nimport mpi4py\nimport numpy\n\nimport chainer.cuda\nimport chainer.utils\nfrom chainermn.communicators import _communication_utility\nfrom chainermn.communicators._communication_utility import chunked_bcast_obj\nfrom chai... | ```python
import collections
import mpi4py
import numpy
import chainer.cuda
import chainer.utils
from chainermn.communicators import _communication_utility
from chainermn.communicators._communication_utility import chunked_bcast_obj
from chainermn.communicators import _memory_utility
from chainermn.communicators import communicator_base
_dtype_mpi_type = {
# see the definition of mpi4py.MPI._typedict (in mpi4py/MPI/typemap.pxi)
numpy.dtype(numpy.int32): mpi4py.MPI._typedict['i'],
numpy.dtype(numpy.int64): mpi4py.MPI._typedict['l'],
numpy.dtype(numpy.float32): mpi4py.MPI._typedict['f'],
numpy.dtype(numpy.float64): mpi4py.MPI._typedict['d'],
}
def _check_dtype(caller, msgtype):
dtype = msgtype.dtype
if dtype not in _dtype_mpi_type.keys():
raise TypeError(
'{} does not support dtype {}'.format(caller, dtype))
def _check_dtypes_are_same(msgtypes):
dtypes = [msgtype.dtype for msgtype in msgtypes]
if any(dtypes[0] != dtype for dtype in dtypes):
raise TypeError('all dtypes must be the same')
def _is_numpy_array(array):
return isinstance(array, numpy.ndarray)
def _is_cupy_array(array):
return chainer.cuda.get_array_module(array) is not numpy
def _cnt_to_dsp(cnt):
"""Utility to convert length array to cumulative array."""
return [0] + numpy.cumsum(cnt)[:-1].tolist()
def _get_mpi_type(msgtype):
dtype = msgtype.dtype
if dtype not in _dtype_mpi_type.keys():
raise TypeError(
'dtype {} is not supported by MpiCommunicator'.format(dtype))
return _dtype_mpi_type[dtype]
class _MessageType(object):
def __init__(self, obj):
if _is_numpy_array(obj) or _is_cupy_array(obj):
self.is_host = _is_numpy_array(obj)
self.is_tuple = False
self.narr = 1
self.ndims = [obj.ndim]
self.shapes = [obj.shape]
self.dtype = obj.dtype
elif isinstance(obj, collections.Iterable):
if all(map(_is_numpy_array, obj)):
self.is_host = True
elif all(map(_is_cupy_array, obj)):
self.is_host = False
else:
raise ValueError(
'All message objects must be either numpy or cupy arrays.')
self.is_tuple = True
self.narr = len(obj)
self.ndims = [x.ndim for x in obj]
self.shapes = [x.shape for x in obj]
dtypes = [x.dtype for x in obj]
if not all(dtype == dtypes[0] for dtype in dtypes):
raise TypeError(
'Message objects must be the same dtype')
self.dtype = dtypes[0]
else:
raise TypeError(
'Message object must be numpy/cupy array or its tuple.')
def get_array_module(self):
if self.is_host:
return numpy
else:
import cupy
return cupy
class MpiCommunicatorBase(communicator_base.CommunicatorBase):
'''MpiCommunicatorBase
Implementation of communicator interface defined by
:class:`CommunicatorBase`. This communicator assumes MPI4py and
all ChainerMN processes are invoked by ``mpirun`` (``mpiexec``)
command. Although this lacks several important methods such as
``allreduce_grad`` to be impelmented with speficic algorithm. See
hierarcical communicator or pure_nccl communicator for example.
'''
def __init__(self, mpi_comm):
self.mpi_comm = mpi_comm
self._init_ranks()
@property
def rank(self):
return self.mpi_comm.rank
@property
def size(self):
return self.mpi_comm.size
@property
def intra_rank(self):
return self._intra_rank
@property
def intra_size(self):
return self._intra_size
@property
def inter_rank(self):
return self._inter_rank
@property
def inter_size(self):
return self._inter_size
def split(self, color, key):
return self.__class__(mpi_comm=self.mpi_comm.Split(color, key))
def alltoall(self, xs):
"""A primitive of inter-process all-to-all function.
This method tries to invoke all-to-all communication within the
communicator. All processes in the communicator are expected to
invoke ``alltoall()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``xs`` is numpy array, the returned array will also be allocated
as numpy array. Additionally, when ``xs`` is cupy array, the returned
array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
xs (tuple of numpy/cupy array)
Returns:
ys (tuple of numpy/cupy array):
Received arrays. The length of tuple equals to
the communicator size.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.alltoall')
if len(xs) != self.size:
raise ValueError(
'The length of data must be same as communicator size.')
# Type check.
msgtypes = [_MessageType(x) for x in xs]
for msgtype in msgtypes:
_check_dtype('alltoall', msgtype)
_check_dtypes_are_same(msgtypes)
send_msgtype = msgtypes[0]
msgtypes = self.mpi_comm.alltoall(msgtypes)
_check_dtypes_are_same(msgtypes)
recv_msgtype = msgtypes[0]
# Collective communication.
slens = [numpy.prod(x.shape) for x in xs]
xp = chainer.cuda.get_array_module(*xs)
sbuf = xp.hstack([x.reshape(-1) for x in xs])
shapes = [msgtype.shapes[0] for msgtype in msgtypes]
rlens = [numpy.prod(s) for s in shapes]
rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype)
if xp is not numpy:
sbuf = _memory_utility.get_device_memory_pointer(sbuf)
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Alltoallv(
[sbuf, (slens, _cnt_to_dsp(slens)), _get_mpi_type(send_msgtype)],
[_memory_utility.get_device_memory_pointer(rbuf),
(rlens, _cnt_to_dsp(rlens)), _get_mpi_type(recv_msgtype)])
ys = [rbuf[i:i + l].reshape(s)
for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)]
return tuple(ys)
def send(self, data, dest, tag):
"""A primitive for inter-process transmitter.
This method sends numpy-array to target process.
The target process is expected to invoke ``recv()``.
This method relies on mpi4py fast communication optimized for
numpy arrays, which discards any information attached to
chainer.Variable objects. Please be sure.
Args:
data: data to be sent (tuple, list or raw numpy/cupy array)
dest (int): Target process specifier.
tag (int): Message ID (MPI feature).
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.send')
msgtype = _MessageType(data)
_check_dtype('send', msgtype)
"""We use ssend() instead of send() to pass unittests.
If we don't use it, an error occurs in
test_point_to_point_communication.py
when using MVAPICH2-2.2 and GPUs.
"""
self.mpi_comm.ssend(msgtype, dest=dest, tag=tag)
# Type check.
if not msgtype.is_tuple:
data = [data]
for array in data:
if chainer.cuda.get_array_module(array) is not numpy:
chainer.cuda.Stream.null.synchronize()
array = (_memory_utility.get_device_memory_pointer(array),
_get_mpi_type(msgtype))
else:
array = numpy.ascontiguousarray(array)
"""We use Ssend() for the same reason as using ssend()."""
self.mpi_comm.Ssend(array, dest=dest, tag=tag)
def recv(self, source, tag):
"""A primitive of inter-process receiver.
This method tries to receive numpy-array from target process.
The target process is expected to invoke ``send()``.
This method relies on mpi4py fast communication optimized for
numpy arrays, which discards any information attached to
chainer.Variable objects. Please be sure.
If the corresponding ``send()`` is invoked with cupy array,
the returned array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
source (int): Target process specifier.
tag (int): Message ID (MPI feature).
Returns:
data (tuple of numpy/cupy array or numpy/cupy array):
Received data. If ``send()`` is invoked with tuple data,
it is also tuple. Otherwise, it is a vanilla numpy/cupy array.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.recv')
msgtype = self.mpi_comm.recv(source=source, tag=tag)
xp = msgtype.get_array_module()
if msgtype.is_tuple:
msg = []
for shape in msgtype.shapes:
buf = xp.empty([numpy.prod(shape)], dtype=msgtype.dtype)
rtype = _get_mpi_type(msgtype)
self.mpi_comm.Recv(
_memory_utility.array_to_buffer_object(buf, rtype),
source=source, tag=tag)
msg.append(buf.reshape(shape))
return tuple(msg)
else:
assert len(msgtype.shapes) == 1
shape = msgtype.shapes[0]
buf = xp.empty([numpy.prod(shape)], dtype=msgtype.dtype)
rtype = _get_mpi_type(msgtype)
self.mpi_comm.Recv(
_memory_utility.array_to_buffer_object(buf, rtype),
source=source, tag=tag)
return buf.reshape(shape)
def bcast(self, x, root=0):
"""A primitive of inter-process broadcast communication.
This method tries to invoke broadcast communication within the
communicator. All processes in the communicator are expected to
invoke ``broadcast()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``bcast()`` is invoked with cupy array in the root process,
the returned array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
x (numpy/cupy array): Array to be broadcasted.
root (int): Rank of root process.
Returns:
ys (tuple of numpy/cupy array): Received arrays.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.bcast')
is_master = self.mpi_comm.rank == root
if is_master:
msgtype = _MessageType(x)
_check_dtype('bcast', msgtype)
if msgtype.is_tuple:
raise TypeError('Tuple data cannot be broadcasted')
msgtype = self.mpi_comm.bcast(msgtype, root)
shape = msgtype.shapes[0]
buf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
self.mpi_comm.Bcast(buf, root)
return x
else:
msgtype = self.mpi_comm.bcast(None, root)
xp = msgtype.get_array_module()
shape = msgtype.shapes[0]
buf = xp.empty([numpy.prod(shape)], dtype=msgtype.dtype)
buftype = _get_mpi_type(msgtype)
self.mpi_comm.Bcast(
_memory_utility.array_to_buffer_object(buf, buftype),
root)
return buf.reshape(shape)
def gather(self, x, root=0):
"""A primitive of inter-process gather communication.
This method tries to invoke gather communication within the
communicator. All processes in the communicator are expected to
invoke ``gather()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``x`` is numpy array, the received data will also be allocated
as numpy array. Additionally, when ``x`` is cupy array, the returned
array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
x (numpy/cupy array): Array to be gathered.
root (int): Rank of root process.
Returns:
ys (tuple of numpy/cupy array):
Received arrays. ``None`` for non-root processes.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.gather')
is_master = self.mpi_comm.rank == root
msgtype = _MessageType(x)
_check_dtype('gather', msgtype)
msgtypes = self.mpi_comm.gather(msgtype, root)
if is_master:
_check_dtypes_are_same(msgtypes)
for msgtype in msgtypes:
if msgtype.is_tuple:
raise TypeError('gather cannot handle tuple data')
assert len(msgtype.shapes) == 1
xp = chainer.cuda.get_array_module(x)
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
shapes = [mty.shapes[0] for mty in msgtypes]
rlens = [numpy.prod(s) for s in shapes]
rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype)
if xp is not numpy:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Gatherv(
sbuf,
[_memory_utility.get_device_memory_pointer(rbuf),
(rlens, _cnt_to_dsp(rlens)), _get_mpi_type(msgtype)],
root)
ys = [rbuf[i:i + l].reshape(s)
for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)]
return tuple(ys)
else:
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
self.mpi_comm.Gatherv(sbuf, None, root)
return None
def allgather(self, x):
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.allgather')
msgtype = _MessageType(x)
_check_dtype('allgather', msgtype)
msgtypes = self.mpi_comm.allgather(msgtype)
_check_dtypes_are_same(msgtypes)
# Type check.
for msgtype in msgtypes:
if msgtype.is_tuple:
raise TypeError('allgather cannot handle tuple data')
assert len(msgtype.shapes) == 1
# Collective communication.
xp = chainer.cuda.get_array_module(x)
shapes = [msgtype.shapes[0] for msgtype in msgtypes]
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
rlens = [numpy.prod(s) for s in shapes]
rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype)
if xp is not numpy:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Allgatherv(
sbuf,
[_memory_utility.get_device_memory_pointer(rbuf),
(rlens, _cnt_to_dsp(rlens)), _get_mpi_type(msgtype)])
ys = [rbuf[i:i + l].reshape(s)
for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)]
return tuple(ys)
def allreduce(self, x):
"""A primitive of inter-process allreduce communication.
This method tries to invoke allreduce communication within the
communicator. All processes in the communicator are expected to
invoke ``allreduce()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
Note that this method can only handle the same shapes of data
over all processes, and cannot handle tuple data.
If ``x`` is numpy array, the received data will also be allocated
as numpy array. Additionally, when ``x`` is cupy array, the returned
array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
x (numpy/cupy array): An array to apply allreduce operation.
Returns:
ys (numpy/cupy array): An array that allreduce (currently SUM only)
has been applied.
"""
chainer.utils.experimental(
'chainermn.communicators.CommunicatorBase.allreduce')
msgtype = _MessageType(x)
_check_dtype('allreduce', msgtype)
if msgtype.is_tuple:
raise TypeError('allreduce cannot handle tuple data')
if msgtype.is_tuple:
raise TypeError('allreduce cannot handle tuple data')
xp = chainer.cuda.get_array_module(x)
# TODO(kuenishi): do we check all messages have same shape and dims?
# Source buffer
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
# Destination buffer
dbuf = xp.empty([numpy.prod(msgtype.shapes[0])], dtype=msgtype.dtype)
dbuf = _memory_utility.array_to_buffer_object(
dbuf, _get_mpi_type(msgtype))
self.mpi_comm.Allreduce(sbuf, dbuf)
return dbuf.reshape(msgtype.shapes[0])
# Objects
def send_obj(self, obj, dest):
self.mpi_comm.send(obj, dest=dest)
def recv_obj(self, source):
return self.mpi_comm.recv(source=source)
def bcast_obj(self, obj, max_buf_len=256 * 1024 * 1024, root=0):
return chunked_bcast_obj(obj, self.mpi_comm,
max_buf_len=max_buf_len,
root=root)
def gather_obj(self, obj, root=0):
return self.mpi_comm.gather(obj, root=root)
def scatter(self, xs, root=0):
"""A primitive of inter-process scatter communication.
This method tries to invoke scatter communication within the
communicator. All processes in the communicator are expected to
invoke ``scatter()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``xs`` is tuple, each element is send to different processes.
The length of the tuple must be the same as the communicator size.
If ``xs`` is ``numpy.ndarrray``, it is splitted with the first
axis and sent to different processes. For slave processes, ``xs``
is allowed to be any value (will be ignored).
If ``scatter()`` is invoked with cupy array in the root process,
the returned array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
xs (tuple of numpy/cupy array): Arrays to be scattered.
root (int): Rank of root process.
Returns:
ys (numpy/cupy array): Received arrays.
"""
chainer.utils.experimental(
'chainermn.communicators.CommunicatorBase.scatter')
is_master = self.mpi_comm.rank == root
if is_master:
# Type check.
msgtype = _MessageType(xs)
_check_dtype('scatter', msgtype)
if msgtype.is_tuple:
if len(msgtype.shapes) != self.size:
raise ValueError(
'the length of xs must be consistent '
'with communicator size')
xp = chainer.cuda.get_array_module(*xs)
msgtype = tuple([_MessageType(x) for x in xs])
shapes = [mty.shapes[0] for mty in msgtype]
# concatenate([x.reshape(-1) ... ], axis=0) will fail
xs = xp.concatenate([x.reshape(1, -1) for x in xs], axis=1)
else:
assert len(msgtype.shapes) == 1
if msgtype.shapes[0][0] != self.mpi_comm.size:
raise ValueError(
'scatter received inconsistent number of inputs '
'with communicator size')
xp = chainer.cuda.get_array_module(xs)
msgtype = tuple([_MessageType(xs[0])
for _ in range(self.size)])
shapes = [xs.shape[1:] for _ in range(self.size)]
msgtype = self.mpi_comm.scatter(msgtype, root)
shape = msgtype.shapes[0]
# Collective communication.
slens = [numpy.prod(s) for s in shapes]
sbuf = _memory_utility.get_device_memory_pointer(xs)
rbuf = xp.empty([numpy.prod(shape)], dtype=msgtype.dtype)
rtype = _get_mpi_type(msgtype)
if xp is not numpy:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Scatterv(
[sbuf, (slens, _cnt_to_dsp(slens)), _get_mpi_type(msgtype)],
_memory_utility.array_to_buffer_object(rbuf, rtype), root)
return rbuf.reshape(shape)
else: # slave processes
msgtypes = self.mpi_comm.scatter(None, root)
xp = msgtypes.get_array_module()
shape = msgtypes.shapes[0]
rbuf = xp.empty([numpy.prod(shape)], dtype=msgtypes.dtype)
rtype = _get_mpi_type(msgtypes)
self.mpi_comm.Scatterv(
None,
_memory_utility.array_to_buffer_object(rbuf, rtype),
root)
return rbuf.reshape(shape)
def allreduce_obj(self, obj):
# Summation by default
return self.mpi_comm.allreduce(obj)
def bcast_data(self, model):
for _, param in sorted(model.namedparams()):
buf = _memory_utility.array_to_buffer_object(param.data)
self.mpi_comm.Bcast(buf)
# Private methods
def _init_ranks(self):
my_ranks = _communication_utility.init_ranks(self.mpi_comm)
assert my_ranks[0] == self.mpi_comm.rank
self._intra_rank = my_ranks[1]
self._intra_size = my_ranks[2]
self._inter_rank = my_ranks[3]
self._inter_size = my_ranks[4]
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n# TODO: Temporarily disabled due to importing old code into openshift-ansible\n# repo. We will work on these over time.\n# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter\n\nimport... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n# TODO: Temporarily disabled due to importing old code into openshift-ansible\n# repo. We will work on these over time.\n# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-par... | ```python
# TODO: Temporarily disabled due to importing old code into openshift-ansible
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
import click
import os
import re
import sys
from ooinstall import openshift_ansible
from ooinstall import OOConfig
from ooinstall.oo_config import OOConfigInvalidHostError
from ooinstall.oo_config import Host
from ooinstall.variants import find_variant, get_variant_version_combos
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
def validate_ansible_dir(path):
if not path:
raise click.BadParameter('An ansible path must be provided')
return path
# if not os.path.exists(path)):
# raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
def is_valid_hostname(hostname):
if not hostname or len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1] # strip exactly one dot from the right, if present
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def validate_prompt_hostname(hostname):
if '' == hostname or is_valid_hostname(hostname):
return hostname
raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
def get_ansible_ssh_user():
click.clear()
message = """
This installation process will involve connecting to remote hosts via ssh. Any
account may be used however if a non-root account is used it must have
passwordless sudo access.
"""
click.echo(message)
return click.prompt('User for ssh access', default='root')
def list_hosts(hosts):
hosts_idx = range(len(hosts))
for idx in hosts_idx:
click.echo(' {}: {}'.format(idx, hosts[idx]))
def delete_hosts(hosts):
while True:
list_hosts(hosts)
del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \
'or n/N to add more hosts', default='n')
try:
del_idx = int(del_idx)
hosts.remove(hosts[del_idx])
except IndexError:
click.echo("\"{}\" doesn't match any hosts listed.".format(del_idx))
except ValueError:
try:
response = del_idx.lower()
if response in ['y', 'n']:
return hosts, response
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
except AttributeError:
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
return hosts, None
def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
"""
Collect host information from user. This will later be filled in using
ansible.
Returns: a list of host information collected from the user
"""
click.clear()
click.echo('*** Host Configuration ***')
message = """
You must now specify the hosts that will compose your OpenShift cluster.
Please enter an IP or hostname to connect to for each system in the cluster.
You will then be prompted to identify what role you would like this system to
serve in the cluster.
OpenShift Masters serve the API and web console and coordinate the jobs to run
across the environment. If desired you can specify multiple Master systems for
an HA deployment, in which case you will be prompted to identify a *separate*
system to act as the load balancer for your cluster after all Masters and Nodes
are defined.
If only one Master is specified, an etcd instance embedded within the OpenShift
Master service will be used as the datastore. This can be later replaced with a
separate etcd instance if desired. If multiple Masters are specified, a
separate etcd cluster will be configured with each Master serving as a member.
Any Masters configured as part of this installation process will also be
configured as Nodes. This is so that the Master will be able to proxy to Pods
from the API. By default this Node will be unschedulable but this can be changed
after installation with 'oadm manage-node'.
OpenShift Nodes provide the runtime environments for containers. They will
host the required services to be managed by the Master.
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
"""
click.echo(message)
hosts = []
more_hosts = True
num_masters = 0
while more_hosts:
host_props = {}
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname)
if not masters_set:
if click.confirm('Will this host be an OpenShift Master?'):
host_props['master'] = True
num_masters += 1
if oo_cfg.settings['variant_version'] == '3.0':
masters_set = True
host_props['node'] = True
host_props['containerized'] = False
if oo_cfg.settings['variant_version'] != '3.0':
rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
type=click.Choice(['rpm', 'container']),
default='rpm')
if rpm_or_container == 'container':
host_props['containerized'] = True
if existing_env:
host_props['new_host'] = True
else:
host_props['new_host'] = False
host = Host(**host_props)
hosts.append(host)
if print_summary:
print_installation_summary(hosts, oo_cfg.settings['variant_version'])
# If we have one master, this is enough for an all-in-one deployment,
# thus we can start asking if you wish to proceed. Otherwise we assume
# you must.
if masters_set or num_masters != 2:
more_hosts = click.confirm('Do you want to add additional hosts?')
if num_masters >= 3:
collect_master_lb(hosts)
return hosts
def print_installation_summary(hosts, version=None):
"""
Displays a summary of all hosts configured thus far, and what role each
will play.
Shows total nodes/masters, hints for performing/modifying the deployment
with additional setup, warnings for invalid or sub-optimal configurations.
"""
click.clear()
click.echo('*** Installation Summary ***\n')
click.echo('Hosts:')
for host in hosts:
print_host_summary(hosts, host)
masters = [host for host in hosts if host.master]
nodes = [host for host in hosts if host.node]
dedicated_nodes = [host for host in hosts if host.node and not host.master]
click.echo('')
click.echo('Total OpenShift Masters: %s' % len(masters))
click.echo('Total OpenShift Nodes: %s' % len(nodes))
if len(masters) == 1 and version != '3.0':
ha_hint_message = """
NOTE: Add a total of 3 or more Masters to perform an HA installation."""
click.echo(ha_hint_message)
elif len(masters) == 2:
min_masters_message = """
WARNING: A minimum of 3 masters are required to perform an HA installation.
Please add one more to proceed."""
click.echo(min_masters_message)
elif len(masters) >= 3:
ha_message = """
NOTE: Multiple Masters specified, this will be an HA deployment with a separate
etcd cluster. You will be prompted to provide the FQDN of a load balancer once
finished entering hosts."""
click.echo(ha_message)
dedicated_nodes_message = """
WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated
Nodes are specified, each configured Master will be marked as a schedulable
Node."""
min_ha_nodes_message = """
WARNING: A minimum of 3 dedicated Nodes are recommended for an HA
deployment."""
if len(dedicated_nodes) == 0:
click.echo(dedicated_nodes_message)
elif len(dedicated_nodes) < 3:
click.echo(min_ha_nodes_message)
click.echo('')
def print_host_summary(all_hosts, host):
click.echo("- %s" % host.connect_to)
if host.master:
click.echo(" - OpenShift Master")
if host.node:
if host.is_dedicated_node():
click.echo(" - OpenShift Node (Dedicated)")
elif host.is_schedulable_node(all_hosts):
click.echo(" - OpenShift Node")
else:
click.echo(" - OpenShift Node (Unscheduled)")
if host.master_lb:
if host.preconfigured:
click.echo(" - Load Balancer (Preconfigured)")
else:
click.echo(" - Load Balancer (HAProxy)")
if host.master:
if host.is_etcd_member(all_hosts):
click.echo(" - Etcd Member")
else:
click.echo(" - Etcd (Embedded)")
def collect_master_lb(hosts):
"""
Get a valid load balancer from the user and append it to the list of
hosts.
Ensure user does not specify a system already used as a master/node as
this is an invalid configuration.
"""
message = """
Setting up High Availability Masters requires a load balancing solution.
Please provide a the FQDN of a host that will be configured as a proxy. This
can be either an existing load balancer configured to balance all masters on
port 8443 or a new host that will have HAProxy installed on it.
If the host provided does is not yet configured, a reference haproxy load
balancer will be installed. It's important to note that while the rest of the
environment will be fault tolerant this reference load balancer will not be.
It can be replaced post-installation with a load balancer with the same
hostname.
"""
click.echo(message)
host_props = {}
# Using an embedded function here so we have access to the hosts list:
def validate_prompt_lb(hostname):
# Run the standard hostname check first:
hostname = validate_prompt_hostname(hostname)
# Make sure this host wasn't already specified:
for host in hosts:
if host.connect_to == hostname and (host.master or host.node):
raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
'please specify a separate host' % hostname)
return hostname
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_lb)
install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?')
host_props['preconfigured'] = not install_haproxy
host_props['master'] = False
host_props['node'] = False
host_props['master_lb'] = True
master_lb = Host(**host_props)
hosts.append(master_lb)
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.hosts
click.clear()
message = """
A list of the facts gathered from the provided hosts follows. Because it is
often the case that the hostname for a system inside the cluster is different
from the hostname that is resolveable from command line or web clients
these settings cannot be validated automatically.
For some cloud providers the installer is able to gather metadata exposed in
the instance so reasonable defaults will be provided.
Plese confirm that they are correct before moving forward.
"""
notes = """
Format:
connect_to,IP,public IP,hostname,public hostname
Notes:
* The installation host is the hostname from the installer's perspective.
* The IP of the host should be the internal IP of the instance.
* The public IP should be the externally accessible IP associated with the instance
* The hostname should resolve to the internal IP from the instances
themselves.
* The public hostname should resolve to the external ip from hosts outside of
the cloud.
"""
# For testing purposes we need to click.echo only once, so build up
# the message:
output = message
default_facts_lines = []
default_facts = {}
for h in hosts:
if h.preconfigured == True:
continue
default_facts[h.connect_to] = {}
h.ip = callback_facts[h.connect_to]["common"]["ip"]
h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
default_facts_lines.append(",".join([h.connect_to,
h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
output = "%s\n%s" % (output, ",".join([h.connect_to,
h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
output = "%s\n%s" % (output, notes)
click.echo(output)
facts_confirmed = click.confirm("Do the above facts look correct?")
if not facts_confirmed:
message = """
Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
""" % oo_cfg.config_path
click.echo(message)
# Make sure we actually write out the config file.
oo_cfg.save_to_disk()
sys.exit(0)
return default_facts
def check_hosts_config(oo_cfg, unattended):
click.clear()
masters = [host for host in oo_cfg.hosts if host.master]
if len(masters) == 2:
click.echo("A minimum of 3 Masters are required for HA deployments.")
sys.exit(1)
if len(masters) > 1:
master_lb = [host for host in oo_cfg.hosts if host.master_lb]
if len(master_lb) > 1:
click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.')
sys.exit(1)
elif len(master_lb) == 1:
if master_lb[0].master or master_lb[0].node:
click.echo('ERROR: The Master load balancer is configured as a master or node. Please correct this.')
sys.exit(1)
else:
message = """
ERROR: No master load balancer specified in config. You must provide the FQDN
of a load balancer to balance the API (port 8443) on all Master hosts.
https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
"""
click.echo(message)
sys.exit(1)
dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master]
if len(dedicated_nodes) == 0:
message = """
WARNING: No dedicated Nodes specified. By default, colocated Masters have
their Nodes set to unschedulable. If you proceed all nodes will be labelled
as schedulable.
"""
if unattended:
click.echo(message)
else:
confirm_continue(message)
return
def get_variant_and_version(multi_master=False):
message = "\nWhich variant would you like to install?\n\n"
i = 1
combos = get_variant_version_combos()
for (variant, version) in combos:
message = "%s\n(%s) %s %s" % (message, i, variant.description,
version.name)
i = i + 1
message = "%s\n" % message
click.echo(message)
if multi_master:
click.echo('NOTE: 3.0 installations are not')
response = click.prompt("Choose a variant from above: ", default=1)
product, version = combos[response - 1]
return product, version
def confirm_continue(message):
if message:
click.echo(message)
click.confirm("Are you ready to continue?", default=False, abort=True)
return
def error_if_missing_info(oo_cfg):
missing_info = False
if not oo_cfg.hosts:
missing_info = True
click.echo('For unattended installs, hosts must be specified on the '
'command line or in the config file: %s' % oo_cfg.config_path)
sys.exit(1)
if 'ansible_ssh_user' not in oo_cfg.settings:
click.echo("Must specify ansible_ssh_user in configuration file.")
sys.exit(1)
# Lookup a variant based on the key we were given:
if not oo_cfg.settings['variant']:
click.echo("No variant specified in configuration file.")
sys.exit(1)
ver = None
if 'variant_version' in oo_cfg.settings:
ver = oo_cfg.settings['variant_version']
variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
if variant is None or version is None:
err_variant_name = oo_cfg.settings['variant']
if ver:
err_variant_name = "%s %s" % (err_variant_name, ver)
click.echo("%s is not an installable variant." % err_variant_name)
sys.exit(1)
oo_cfg.settings['variant_version'] = version.name
missing_facts = oo_cfg.calc_missing_facts()
if len(missing_facts) > 0:
missing_info = True
click.echo('For unattended installs, facts must be provided for all masters/nodes:')
for host in missing_facts:
click.echo('Host "%s" missing facts: %s' % (host, ", ".join(missing_facts[host])))
if missing_info:
sys.exit(1)
def get_missing_info_from_user(oo_cfg):
""" Prompts the user for any information missing from the given configuration. """
click.clear()
message = """
Welcome to the OpenShift Enterprise 3 installation.
Please confirm that following prerequisites have been met:
* All systems where OpenShift will be installed are running Red Hat Enterprise
Linux 7.
* All systems are properly subscribed to the required OpenShift Enterprise 3
repositories.
* All systems have run docker-storage-setup (part of the Red Hat docker RPM).
* All systems have working DNS that resolves not only from the perspective of
the installer but also from within the cluster.
When the process completes you will have a default configuration for Masters
and Nodes. For ongoing environment maintenance it's recommended that the
official Ansible playbooks be used.
For more information on installation prerequisites please see:
https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
"""
confirm_continue(message)
click.clear()
if oo_cfg.settings.get('ansible_ssh_user', '') == '':
oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
click.clear()
if oo_cfg.settings.get('variant', '') == '':
variant, version = get_variant_and_version()
oo_cfg.settings['variant'] = variant.name
oo_cfg.settings['variant_version'] = version.name
click.clear()
if not oo_cfg.hosts:
oo_cfg.hosts = collect_hosts(oo_cfg)
click.clear()
return oo_cfg
def collect_new_nodes(oo_cfg):
click.clear()
click.echo('*** New Node Configuration ***')
message = """
Add new nodes here
"""
click.echo(message)
return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
for host in hosts:
if(host.connect_to in callback_facts.keys()
and 'common' in callback_facts[host.connect_to].keys()
and callback_facts[host.connect_to]['common'].get('version', '')
and callback_facts[host.connect_to]['common'].get('version', '') != 'None'):
installed_hosts.append(host)
return installed_hosts
# pylint: disable=too-many-branches
# This pylint error will be corrected shortly in separate PR.
def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# Copy the list of existing hosts so we can remove any already installed nodes.
hosts_to_run_on = list(oo_cfg.hosts)
# Check if master or nodes already have something installed
installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
if len(installed_hosts) > 0:
click.echo('Installed environment detected.')
# This check has to happen before we start removing hosts later in this method
if not force:
if not unattended:
click.echo('By default the installer only adds new nodes ' \
'to an installed environment.')
response = click.prompt('Do you want to (1) only add additional nodes or ' \
'(2) reinstall the existing hosts ' \
'potentially erasing any custom changes?',
type=int)
# TODO: this should be reworked with error handling.
# Click can certainly do this for us.
# This should be refactored as soon as we add a 3rd option.
if response == 1:
force = False
if response == 2:
force = True
# present a message listing already installed hosts and remove hosts if needed
for host in installed_hosts:
if host.master:
click.echo("{} is already an OpenShift Master".format(host))
# Masters stay in the list, we need to run against them when adding
# new nodes.
elif host.node:
click.echo("{} is already an OpenShift Node".format(host))
# force is only used for reinstalls so we don't want to remove
# anything.
if not force:
hosts_to_run_on.remove(host)
# Handle the cases where we know about uninstalled systems
new_hosts = set(hosts_to_run_on) - set(installed_hosts)
if len(new_hosts) > 0:
for new_host in new_hosts:
click.echo("{} is currently uninstalled".format(new_host))
# Fall through
click.echo('Adding additional nodes...')
else:
if unattended:
if not force:
click.echo('Installed environment detected and no additional ' \
'nodes specified: aborting. If you want a fresh install, use ' \
'`atomic-openshift-installer install --force`')
sys.exit(1)
else:
if not force:
new_nodes = collect_new_nodes(oo_cfg)
hosts_to_run_on.extend(new_nodes)
oo_cfg.hosts.extend(new_nodes)
openshift_ansible.set_config(oo_cfg)
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
if error:
click.echo("There was a problem fetching the required information. See " \
"{} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
else:
pass # proceeding as normal should do a clean install
return hosts_to_run_on, callback_facts
@click.group()
@click.pass_context
@click.option('--unattended', '-u', is_flag=True, default=False)
@click.option('--configuration', '-c',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default=None)
@click.option('--ansible-playbook-directory',
'-a',
type=click.Path(exists=True,
file_okay=False,
dir_okay=True,
readable=True),
# callback=validate_ansible_dir,
default=DEFAULT_PLAYBOOK_DIR,
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
@click.option('--ansible-config',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default=None)
@click.option('--ansible-log-path',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default="/tmp/ansible.log")
@click.option('-v', '--verbose',
is_flag=True, default=False)
#pylint: disable=too-many-arguments
#pylint: disable=line-too-long
# Main CLI entrypoint, not much we can do about too many arguments.
def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
"""
atomic-openshift-installer makes the process for installing OSE or AEP
easier by interactively gathering the data needed to run on each host.
It can also be run in unattended mode if provided with a configuration file.
Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
"""
ctx.obj = {}
ctx.obj['unattended'] = unattended
ctx.obj['configuration'] = configuration
ctx.obj['ansible_config'] = ansible_config
ctx.obj['ansible_log_path'] = ansible_log_path
ctx.obj['verbose'] = verbose
try:
oo_cfg = OOConfig(ctx.obj['configuration'])
except OOConfigInvalidHostError as e:
click.echo(e)
sys.exit(1)
# If no playbook dir on the CLI, check the config:
if not ansible_playbook_directory:
ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
# If still no playbook dir, check for the default location:
if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
validate_ansible_dir(ansible_playbook_directory)
oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
oo_cfg.ansible_playbook_directory = ansible_playbook_directory
ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
if ctx.obj['ansible_config']:
oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
elif 'ansible_config' not in oo_cfg.settings and \
os.path.exists(DEFAULT_ANSIBLE_CONFIG):
# If we're installed by RPM this file should exist and we can use it as our default:
oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
ctx.obj['oo_cfg'] = oo_cfg
openshift_ansible.set_config(oo_cfg)
@click.command()
@click.pass_context
def uninstall(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
click.echo("OpenShift will be uninstalled from the following hosts:\n")
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
for host in oo_cfg.hosts:
click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Uninstall cancelled.")
sys.exit(0)
openshift_ansible.run_uninstall_playbook(verbose)
@click.command()
@click.pass_context
def upgrade(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
# Update config to reflect the version we're targetting, we'll write
# to disk once ansible completes successfully, not before.
old_variant = oo_cfg.settings['variant']
old_version = oo_cfg.settings['variant_version']
if oo_cfg.settings['variant'] == 'enterprise':
oo_cfg.settings['variant'] = 'openshift-enterprise'
version = find_variant(oo_cfg.settings['variant'])[1]
oo_cfg.settings['variant_version'] = version.name
click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % (
old_variant, old_version, oo_cfg.settings['variant'],
oo_cfg.settings['variant_version']))
for host in oo_cfg.hosts:
click.echo(" * %s" % host.connect_to)
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Upgrade cancelled.")
sys.exit(0)
retcode = openshift_ansible.run_upgrade_playbook(verbose)
if retcode > 0:
click.echo("Errors encountered during upgrade, please check %s." %
oo_cfg.settings['ansible_log_path'])
else:
oo_cfg.save_to_disk()
click.echo("Upgrade completed! Rebooting all hosts is recommended.")
@click.command()
@click.option('--force', '-f', is_flag=True, default=False)
@click.pass_context
def install(ctx, force):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if ctx.obj['unattended']:
error_if_missing_info(oo_cfg)
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
check_hosts_config(oo_cfg, ctx.obj['unattended'])
print_installation_summary(oo_cfg.hosts, oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
verbose)
if error:
click.echo("There was a problem fetching the required information. " \
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
hosts_to_run_on, callback_facts = get_hosts_to_run_on(
oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
click.echo('Writing config to: %s' % oo_cfg.config_path)
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
# TODO: if there are *new* nodes and this is a live install, we may need the user
# to confirm the settings for new nodes. Look into this once we're distinguishing
# between new and pre-existing nodes.
if len(oo_cfg.calc_missing_facts()) > 0:
confirm_hosts_facts(oo_cfg, callback_facts)
oo_cfg.save_to_disk()
click.echo('Ready to run installation process.')
message = """
If changes are needed please edit the config file above and re-run.
"""
if not ctx.obj['unattended']:
confirm_continue(message)
error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
hosts_to_run_on, verbose)
if error:
# The bootstrap script will print out the log location.
message = """
An error was detected. After resolving the problem please relaunch the
installation process.
"""
click.echo(message)
sys.exit(1)
else:
message = """
The installation was successful!
If this is your first time installing please take a look at the Administrator
Guide for advanced options related to routing, storage, authentication and much
more:
http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
"""
click.echo(message)
click.pause()
cli.add_command(install)
cli.add_command(upgrade)
cli.add_command(uninstall)
if __name__ == '__main__':
# This is expected behaviour for context passing with click library:
# pylint: disable=unexpected-keyword-arg
cli(obj={})
``` |
[
{
"content": "Here is a code file:\n```python\n#!/usr/local/bin/python\n# VER=2.0.0.2\n# 09 FEB 2012\n\n\"\"\"\nfg UTILITIES\nrequires:\n + winscp for win32\n + pexpect 2.3 on linux\n\"\"\"\nimport re, sys, time, os, getpass, string, traceback\nfrom os import popen\nfrom optparse import OptionParser\nfrom subpr... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/local/bin/python\n# VER=2.0.0.2\n# 09 FEB 2012\n\n\"\"\"\nfg UTILITIES\nrequires:\n + winscp for win32\n + pexpect 2.3 on linux\n\"\"\"\nimport re, sys, time, os, getpass, string, traceback\nfrom os import popen\nfrom optparse import OptionPa... | ```python
#!/usr/local/bin/python
# VER=2.0.0.2
# 09 FEB 2012
"""
fg UTILITIES
requires:
+ winscp for win32
+ pexpect 2.3 on linux
"""
import re, sys, time, os, getpass, string, traceback
from os import popen
from optparse import OptionParser
from subprocess import *
try:
import pexpect
except:
pass
class fg:
def __init__(self, userLogin, userID, userPassword, server, **kwargs):
"""
Initializes class setup some variables.
fg = fg(userLogin, userID, userPassword, server, kwargs[sharedDIRBool, userDIRBool, diskDIRBool, fileWildcard, debugBool, timeout, privKeyFile])
"""
self.sharedDIRBool = self.userDIRBool = self.diskDIRBool = False
self.fileWildcard = ""
self.debugBool = False
self.timeout = 120
#determine OS
self.platform = sys.platform
if self.debugBool: print "Running on %s" % self.platform
self.userLogin = userLogin
self.userID = userID
self.userPassword = userPassword
self.server = server
self.remoteDir = ""
self.destDir = "."
#self.privKeyFile = privKeyFile
if kwargs.__contains__("sharedDIRBool"):
self.sharedDIRBool = kwargs["sharedDIRBool"]
if self.sharedDIRBool: self.remoteDir = "/data/shared/"
if kwargs.__contains__("userDIRBool"):
self.userDIRBool = kwargs["userDIRBool"]
if self.userDIRBool: self.remoteDir = "/data/users/" + self.userID + "/"
if kwargs.__contains__("diskDIRBool"):
self.diskDIRBool = kwargs["diskDIRBool"]
if self.diskDIRBool: self.remoteDir = "/data/gc/"
if kwargs.__contains__("privKeyFile"):
self.privKeyFile = kwargs["privKeyFile"]
if kwargs.__contains__("fileWildcard"):
self.fileWildcard = kwargs["fileWildcard"]
self.debugBool = kwargs["debugBool"]
self.timeout = int(kwargs["timeout"])
#ask for a password if the user didn't specify one or a privKeyFile
if not self.userPassword and not self.privKeyFile:
self.userPassword = self.setPass()
if not self.userID:
print "USER ID NOT SET!!"
exit(0)
if not os.path.isfile(self.privKeyFile):
print bcolors.BOLD + bcolors.FAIL + "\n\t[!] Key file does not exist: " + self.privKeyFile + bcolors.ENDC + "\n\n"
sys.stdout.flush()
exit(0)
#this is the host key for the server to SSH into, needed for winscp
self.host_key = "ssh-rsa 2048 xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx"
if(self.platform == "linux2"):
self.sshKeys = [
'authenticity',
'assword:',
'denied',
'No such file or directory',
'100%',
'ETA',
pexpect.EOF,
'Permission denied',
'total '
]
self.sftpKeys = [
'authenticity',
'assword:',
'denied',
pexpect.EOF,
'sftp>',
'Connecting to'
]
#--------------------------------
def setPass(self):
"""
Prompts the user for a password if this class was not passed the password by another script
"""
print "\n"
userPassword = getpass.getpass()
if self.debugBool: print "Password set: %s" % (userPassword)
print "\n\n"
return(userPassword)
#--------------------------------
def fgAutoGet(self):
"""
Automatically gets the files. Does a dir, displays the file list, prompts user for all, #, or wildcard get
"""
#if self.debugBool: print "Using options: %s --> %s" % (self.type, self.userLogin)
if(self.platform == "win32"):
# list the files then display them to the user
print "AUTO GET FILES WIN32"
print "===================================="
#cmd = 'cmd.exe /c winscp ' + self.userLogin + ":" + self.userPassword + '@' + self.server + " -hostkey\=\"" + self.host_key + "\" /command \"option confirm off\" \"get " + self.remoteDir + self.fileWildcard + "* " + self.destDir + "\ \" exit \n"
#cmdnopass = 'cmd.exe /c winscp ' + self.userLogin + ":" + "<PASSWORD>" + '@' + self.server + " -hostkey\=\"" + self.host_key + "\" /command \"option confirm off\" \"get " + self.remoteDir + self.fileWildcard + "* " + self.destDir + "\ \" exit \n"
cmd = 'cmd.exe /c winscp ' + "/console /command \"open " + self.userLogin + ":" + self.userPassword + '@' + self.server + "\" \"option confirm off\" \"get " + self.remoteDir + self.fileWildcard + "* " + self.destDir + "\ \" exit" + " -hostkey\=\"" + self.host_key
print cmd
#print "SENDING COMMAND: %s" % cmdnopass
#output = fg.winRunIt(cmd)
#print "\t[+] " + output.strip()
elif(self.platform == "linux2"):
print "AUTO GET FILES LINUX"
additionalArgs=""
#If we need to pass some additional args, do so here
if (self.privKeyFile):
additionalArgs= '-i ' + self.privKeyFile + ' '
if (self.fileWildcard[0]=='^'):
cmd = 'scp ' + str(additionalArgs) + self.userLogin + '@' + self.server + ':' + self.remoteDir + self.fileWildcard.lstrip('^') + "* " + self.destDir
else:
cmd = 'scp ' + str(additionalArgs) + self.userLogin + '@' + self.server + ':' + self.remoteDir + "*" + self.fileWildcard + "* " + self.destDir
print "===================================="
print "\t" + cmd
try:
outputChild = fg.nixRunIt(cmd, self.sshKeys)
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + "\n\t[!] " + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
#--------------------------------
def fgManualGet(self):
"""
Provides the user with a list of files then gets the user selected files.
"""
file_re = re.compile(r"^[drwx-]+\s", re.IGNORECASE | re.VERBOSE)
if(self.platform == "win32"):
#cd into directory then dir
print "====================================\n"
print " SORRY NOT WORKING YET! PUNT!"
exit(0)
#cmd = 'cmd.exe /c winscp ' + self.userLogin + ":" + self.userPassword + '@' + self.server + " -hostkey\=\"" + self.host_key + "\" /command \"cd " + self.remoteDir + "\" dir exit \n"
#output = fg.winRunIt(cmd)
elif(self.platform == "linux2"):
additionalArgs=""
#If we need to pass some additional args, do so here
if (self.privKeyFile):
additionalArgs= '-oIdentityFile=' + self.privKeyFile + ' '
# TODO, implement this with sftp: sftp -oIdentityFile=/root/testKey op@server
sftpCmd = 'sftp ' + str(additionalArgs) + self.userLogin + '@' + self.server
sftpRunCmd='ls -l ' + self.remoteDir
print sftpCmd + " THEN RUNNING " + sftpRunCmd
print "===================================="
try:
#outputChild = fg.sftpRunCmd(sftpCmd,sftpRunCmd, self.sftpKeys)
result = fg.sftpRunCmd(sftpCmd,sftpRunCmd, self.sftpKeys)
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + "\n\t[!] " + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
#lines = string.split(str(outputChild.before), "\r\n")
#outputChild.close()
#result = string.split(str(outputChild.before), "\r\n")
lines = string.split(str(result), "\r\n")
fileList = {}
print "\t[+] Getting list of files...\n"
for line in lines:
if file_re.match(line):
filename = re.split('\s+', line)
nf = string.strip(filename[len(filename)-1])
nftype = string.strip(filename[0])
if not (nf == "." or nf == ".."):
fileList[nf] = nftype
cnt = 1
keys = fileList.keys()
keys.sort()
fileList2 = {}
for key in keys:
print "\t[%3s] %10s %s" % (cnt, fileList[key], key)
fileList2[cnt] = [key, fileList[key]]
cnt = cnt + 1
if cnt > 1:
print "Please select file(s) to copy: (\"all\" | num,[num...] | part of the filename) q = quit"
filesget = raw_input('-->')
print "====================================\n"
else:
print "NO FILES WAITING! SKIPPING PROMPT!"
filesget = "quit"
if filesget == "q" or filesget == "quit":
exit(0)
elif filesget == "all":
#get all files
for key in keys:
cmd = "scp " + str(additionalArgs) + self.userLogin + "@" + self.server + ":" + self.remoteDir + key + " " + self.destDir
print "\t[+] " + cmd
try:
outputChild = fg.nixRunIt(cmd, self.sshKeys)
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + "\n\t[!] " + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
print "\t======="
#get #,# | # #
elif re.match("[0-9\,]+", filesget):
filesget = filesget.replace(", ", ",")
tmpF = re.split(",|\s", filesget)
for i in tmpF:
#catch error when user put in number out of index, or not an INT
if str(i).isdigit() and int(i) <= int(len(keys)):
cmd = "scp " + str(additionalArgs) + self.userLogin + "@" + self.server + ":" + self.remoteDir + str(fileList2[int(i)][0]) + " " + self.destDir
print "\t[+] " + cmd
try:
outputChild = fg.nixRunIt(cmd, self.sshKeys)
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + "\n\t[!] " + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
print "\t======="
else:
#raise CustomException("\t[!] BAD USER INPUT FORMAT! - %s, MALFORMED CHARACTER OR INDEX OUT OF BOUNDS!!" % i)
if str(i).isdigit() and int(i) > int(len(keys)):
#try a wildcard get on the file even though it is an integer before bailing out
getFileStr = "*" +str(i) + "*"
cmd = "scp " + str(additionalArgs) + self.userLogin + "@" + self.server + ":" + self.remoteDir + getFileStr + " " + self.destDir
print "\t[+] " + cmd
try:
#TODO properly handle the output for when this matches multiple files (it works it just doesn't show all the files that got copied)
outputChild = fg.nixRunIt(cmd, self.sshKeys)
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + "You either entered a number that was invalid or a filename with digits only which apparently wasn't on the server"
print bcolors.BOLD + bcolors.FAIL + "\n\t[!] " + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
#print bcolors.BOLD + bcolors.FAIL + "\t[!] BAD USER INPUT! <" + str(i) + "> INDEX OUT OF BOUNDS, SKIPPING TO NEXT ONE..." + bcolors.ENDC
#print "\t======="
else:
print bcolors.BOLD + bcolors.FAIL + "\t[!] NO IDEA WHAT YOU DID! <" + str(i) + ">, SKIPPING TO NEXT ONE..." + bcolors.ENDC
print "\t======="
#get filename match
#TODO fixup case where string is given that doesn't match ( ie someone accidentally types filename,1,3 )
elif re.match('\w+', filesget):
for key in keys:
if re.search(filesget, key, re.IGNORECASE | re.VERBOSE):
cmd = "scp " + str(additionalArgs) + self.userLogin + "@" + self.server + ":" + self.remoteDir + key + " " + self.destDir
print "\t[+] " + cmd
try:
outputChild = fg.nixRunIt(cmd, self.sshKeys)
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + "\n\t[!] " + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
print "\t======="
#This seems to not be needed
#elif (keys=1): #if we get througnall keys and no match:
# print "DEBUGGING key " + key + " keys " + str(keys) + " filesget " + filesget
# raise CustomException("\t[!] FILE MATCH NOT FOUND! - THINK ABOUT WHAT YOU WANT THEN TRY AGAIN!!")
else:
raise CustomException("\t[!] BAD USER INPUT FORMAT! - THINK ABOUT WHAT YOU WANT THEN TRY AGAIN!!")
#--------------------------------
def winRunIt(self, cmd):
"""
Run a command
"""
pass
#print "Running " + cmd
#p1 = Popen(cmd, stdout=PIPE, stderr=PIPE)
#output = p1.communicate()[0]
#erroutput = p1.communicate()[1]
#p1.wait()
#return output
#--------------------------------
def sftpRunCmd(self, sftpConnectCmd, sftpCommand, expectKeys):
child = pexpect.spawn(sftpConnectCmd, timeout=self.timeout,)
seen = child.expect(expectKeys)
workedB = False
printWorkedCNT = 0
cnt = 0
cnt2 = 0
#yup, this is a horrible duplication of code
while seen != 3:
#print "Debugging " + str(child)
cnt = cnt + 1
if printWorkedCNT == 1:
sys.stdout.write(bcolors.OKGREEN + "\r[OK]" + bcolors.ENDC + "\n")
sys.stdout.flush()
sys.stdout.write("\t[+] RUNNING COMMAND [ " + sftpConnectCmd + " ]")
sys.stdout.flush()
#~~~~~~~~~~~~~~~
#authenticty
if seen == 0:
sys.stdout.write("\t[+] ACCEPTING RSA KEY...")
sys.stdout.flush()
child.sendline('yes')
seen = child.expect(expectKeys)
sys.stdout.write(bcolors.OKGREEN + "\r[OK]" + bcolors.ENDC + "\n")
sys.stdout.flush()
#assword:
if seen == 1:
child.sendline(self.userPassword)
if cnt2 < 1:
sys.stdout.write("\t[+] AUTHENTICATING WITH SSH SERVER...")
sys.stdout.flush()
else:
if cnt2 == 1:
sys.stdout.write("\r|")
sys.stdout.flush()
if cnt2 == 2:
sys.stdout.write("\r/")
sys.stdout.flush()
if cnt2 == 3:
sys.stdout.write("\r-")
sys.stdout.flush()
if cnt2 == 4:
sys.stdout.write("\r\\")
sys.stdout.flush()
cnt2 = 0
cnt2 = cnt2 + 1
seen = child.expect(expectKeys)
#sftp>
if seen == 4:
sys.stdout.write(bcolors.OKGREEN + "\r[OK]" + bcolors.ENDC + "\n")
sys.stdout.flush()
print "Sending command " + sftpCommand
sys.stdout.flush()
child.sendline(sftpCommand)
seen = child.expect(expectKeys)
sys.stdout.write(bcolors.OKGREEN + "\r[OK]" + bcolors.ENDC + "\n")
sys.stdout.flush()
workedB = True
#print "DEBUGGING case 4 " + str(child)
result=str(child.before)
#now quit and cleanup
child.sendline("quit")
seen = child.expect(expectKeys)
child.close()
return result
#Connecting to ...
if seen == 5:
print "Connecting to server"
seen = child.expect(expectKeys)
if workedB:
sys.stdout.write(bcolors.OKGREEN + "\r[OK]" + bcolors.ENDC + "\n")
sys.stdout.flush()
sys.stdout.write(bcolors.OKGREEN + "[OK]" + bcolors.ENDC + "\t[+] SESSION COMPLETE!\n")
sys.stdout.flush()
else:
print bcolors.BOLD + bcolors.FAIL + "\n\t[!] CONNECTION ERROR - CHECK IP ADDRESS, USERNAME, OR PASSWORD\n\n"
sys.stdout.flush()
#seen = child.expect(expectKeys)
return(child)
#--------------------------------
def nixRunIt(self, cmd, expectKeys):
"""
Controls Pexpect for
"""
child = pexpect.spawn(cmd, timeout=self.timeout,)
seen = child.expect(expectKeys)
workedB = False
printWorkedCNT = 0
cnt = 0
cnt2 = 0
while seen != 6:
cnt = cnt + 1
if printWorkedCNT == 1:
sys.stdout.write(bcolors.OKGREEN + "\r[OK]" + bcolors.ENDC + "\n")
sys.stdout.flush()
sys.stdout.write("\t[+] RUNNING COMMAND [ " + cmd + " ]")
sys.stdout.flush()
#~~~~~~~~~~~~~~~
#authenticty
if seen == 0:
sys.stdout.write("\t[+] ACCEPTING RSA KEY...")
sys.stdout.flush()
child.sendline('yes')
seen = child.expect(expectKeys)
sys.stdout.write(bcolors.OKGREEN + "\r[OK]" + bcolors.ENDC + "\n")
sys.stdout.flush()
#assword:
if seen == 1:
child.sendline(self.userPassword)
if cnt2 < 1:
sys.stdout.write("\t[+] AUTHENTICATING WITH SSH SERVER...")
sys.stdout.flush()
else:
if cnt2 == 1:
sys.stdout.write("\r|")
sys.stdout.flush()
if cnt2 == 2:
sys.stdout.write("\r/")
sys.stdout.flush()
if cnt2 == 3:
sys.stdout.write("\r-")
sys.stdout.flush()
if cnt2 == 4:
sys.stdout.write("\r\\")
sys.stdout.flush()
cnt2 = 0
cnt2 = cnt2 + 1
seen = child.expect(expectKeys)
#denied:
if seen == 2:
workedB = False
child.kill(0)
raise CustomException("ACCESS DENIED! - CHECK USERNAME OR PASSWORD\n\n\t!! IF YOU SEE A DIALOG BOX CLOSE PRESS CANCEL !!")
#'No such file or directory',
if seen == 3:
#workedB = False
child.kill(0)
sys.stdout.write(bcolors.OKGREEN + "\r[OK]" + bcolors.ENDC + "\n")
sys.stdout.flush()
raise CustomException("FILE MATCH NOT FOUND! - MAYBE THERE ARE NO FILES WAITING FOR YOU ON THE SERVER?")
#100%
if seen == 4:
printWorkedCNT = printWorkedCNT + 1
workedB = True
sys.stdout.write(bcolors.OKGREEN + "\r[OK]" + bcolors.ENDC + "\n")
sys.stdout.flush()
sys.stdout.write("\t")
sys.stdout.flush()
tmpStr = str(child.before)
tmpStr = tmpStr.replace("\r", "")
tmpStr = tmpStr.replace("\d", "")
tmpStr = tmpStr.replace("\n", "")
sys.stdout.write(tmpStr)
sys.stdout.flush()
seen = child.expect(expectKeys)
#ETA
if seen == 5:
printWorkedCNT = printWorkedCNT + 1
workedB = True
if cnt == 1:
sys.stdout.write("\r|")
sys.stdout.flush()
if cnt == 2:
sys.stdout.write("\r/")
sys.stdout.flush()
if cnt == 3:
sys.stdout.write("\r-")
sys.stdout.flush()
if cnt == 4:
sys.stdout.write("\r\\")
sys.stdout.flush()
cnt = 1
seen = child.expect(expectKeys)
#Permission denied
if seen == 7:
workedB = False
child.kill(0)
raise CustomException("ACCESS DENIED! - CHECK USERNAME OR PASSWORD\n\n\t!! IF YOU SEE A DIALOG BOX CLOSE PRESS CANCEL !!")
workedB = True
#total (result from an ls when a key is used versus password authentication)
if seen == 8:
wokedB = True
sys.stdout.write("\t[+] REMOTE LISTING COMPLETE.")
sys.stdout.flush()
seen = child.expect(expectKeys)
if workedB:
sys.stdout.write(bcolors.OKGREEN + "\r[OK]" + bcolors.ENDC + "\n")
sys.stdout.flush()
sys.stdout.write(bcolors.OKGREEN + "[OK]" + bcolors.ENDC + "\t[+] SESSION COMPLETE!\n")
sys.stdout.flush()
else:
print bcolors.BOLD + bcolors.FAIL + "\n\t[!] CONNECTION ERROR - CHECK IP ADDRESS, USERNAME, OR PASSWORD\n\n"
sys.stdout.flush()
#seen = child.expect(expectKeys)
return(child)
#--------------------------------
class CustomException(Exception):
"""
Custom Exceptions...kinda
"""
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
#--------------------------------
class bcolors:
"""
Pretty colors on the console
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.BOLD = ''
self.ENDC = ''
#--------------------------------
if(__name__ == "__main__"):
"""
Main
"""
# setup args
VER = '2.0.0.1'
parser = OptionParser(usage='%prog -l <USERLOGIN> -u <USERID> -p <USERPASS> -s <SERVER> (--sharedDIR|--userDIR|--diskDIR) [-f PART_OF_FILENAME]', add_help_option = True)
#connection info
parser.add_option("-v", dest="versionB", action="store_true", default=False)
parser.add_option("-l", "--LoginUser", dest="userLogin", help="Your server login username")
parser.add_option("-u", "--userID", dest="userID", help="Your user ID number")
parser.add_option("-p", "--pass", dest="userPassword", default=None, help="Your password")
parser.add_option("-s", "--server", dest="server", help="The server to connect to")
#types
parser.add_option("--sharedDIR", dest="sharedDIRBool", action="store_true", default=False, help="Get files from shared directory")
parser.add_option("--userDIR", dest="userDIRBool", action="store_true", default=False, help="Get files from user directory")
parser.add_option("--diskDIR", dest="diskDIRBool", action="store_true", default=False, help="Get files from disk directory")
parser.add_option("-f", "--file", dest="fileWildcard", default=None, help="Get files with this wildcard; REGEX used => .*YOURTEXT.*")
parser.add_option("-i", "--privKeyFile", dest="privKeyFile", default=None, help="Keyfile to use for server authentication")
parser.add_option("--debug", dest="debugBool", action="store_true", default=False, help="Prints more stuff to the screen")
parser.add_option("--timeout", dest="timeout", default=120, help="Overrides the timeout for ssh sessions to server")
(options, sys.argv) = parser.parse_args(sys.argv)
#print "login:" + options.userLogin + "\nuser:" + options.userID + "\npass:" + options.userPassword + "\nserver:" + options.server + "\nshared:" + str(options.sharedDIRBool) + "\nuser:" + str(options.userDIRBool) + "\ndisk:" + str(options.diskDIRBool) + "\nwildcard:" + str(options.fileWildcard) + "\ndebug:" + str(options.debugBool) + "\ntimeout:" + str(options.timeout)
if options.versionB:
print VER
exit(0)
#User must put in one of these options or fail!
if not(options.sharedDIRBool or options.userDIRBool or options.diskDIRBool):
print "\n\n!!! DID NOT SPECIFY TYPE !!!\n\t[--sharedDIR | --userDIR | --diskDIR]\n\n"
exit(0)
try:
fg = fg(options.userLogin, options.userID, options.userPassword, options.server, sharedDIRBool=options.sharedDIRBool, userDIRBool=options.userDIRBool, diskDIRBool=options.diskDIRBool, fileWildcard=options.fileWildcard, debugBool=options.debugBool, timeout=options.timeout, privKeyFile=options.privKeyFile)
except:
print "\n\n!!! FG EXCEPTION !!!\n!!! CHECK USAGE !!!"
print "usage: fg.py -l <USERLOGIN> -u <USERID> -p <USERPASS> -s <SERVER> (--sharedDIR|--userDIR|--diskDIR) [-f PART_OF_FILENAME]\n\n"
try:
raise CustomException("ACCESS DENIED! - CHECK USERNAME OR PASSWORD\n\n\t!! IF YOU SEE A DIALOG BOX CLOSE PRESS CANCEL !!")
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + instance.parameter + bcolors.ENDC + "\n\n"
if options.debugBool: print sys.exc_info()
if options.debugBool: print str(traceback.tb_lineno(sys.exc_traceback))
exit(0)
#shared
if options.sharedDIRBool:
if options.debugBool: print "SHARED!!"
if options.fileWildcard:
print "AUTO GET WITH WILDCARD %s" % options.fileWildcard
try:
fg.fgAutoGet()
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
else:
print "PROMPT USER FILENAMES TO GET"
try:
fg.fgManualGet()
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
#user
elif options.userDIRBool:
if options.debugBool: print "USER_DIR!!"
if options.fileWildcard:
print "AUTO GET WITH WILDCARD %s" % options.fileWildcard
try:
fg.fgAutoGet()
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
else:
print "PROMPT USER FILENAMES TO GET"
try:
fg.fgManualGet()
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
#disks
elif options.diskDIRBool:
if options.debugBool: print "DISK!!"
if options.fileWildcard:
print "AUTO GET WITH WILDCARD %s" % options.fileWildcard
try:
fg.fgAutoGet()
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
else:
print "PROMPT USER FILENAMES TO GET"
try:
fg.fgManualGet()
except CustomException, (instance):
print bcolors.BOLD + bcolors.FAIL + instance.parameter + bcolors.ENDC + "\n\n"
exit(0)
print "\n\n\n"
#----------------------------------
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\nfrom django.db import models\nfrom django.utils import timezone\nfrom django_extensions.db.models import TimeStampedModel\n\nfrom .managers import PartyQuerySet\n\n\nclass Party(TimeStampedModel):\n \"\"\"\n A UK... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\nfrom django.db import models\nfrom django.utils import timezone\nfrom django_extensions.db.models import TimeStampedModel\n\nfrom .managers import PartyQuerySet\n\n\nclass Party(TimeStampedModel):\n ... | ```python
from django.db import models
from django.utils import timezone
from django_extensions.db.models import TimeStampedModel
from .managers import PartyQuerySet
class Party(TimeStampedModel):
"""
A UK political party
The canonical source of these parties is The Electoral Commission, with
some exceptions. In Law, candidate can *technically* stand for 2 or
fewer parties (that is, 0, 1 or 2 parties).
To save representing this in the data model (and all the complexities that
would arise) we make three types of "pseudo-party" objects:
1. "Independent" (standing for 0 parties) is given the ID "ynmp-party:2"
2. Joint parties (candidate standing for two parties) are each given a
pseudo-party. These parties are guesses at by looking at the descriptions
in the source data. For example, a description might look like:
"Labour and Co-operative Party (Joint Description with Labour Party)"
If we detect "Joint Description with" (or similar) we make a new party
3. "Speaker seeking re-election". The speaker of the House of Commons doesn't
stand for a party, rather they are elected directly in to that role
(sort of). This is given the ID "ynmp-party:12522"
"""
ec_id = models.CharField(
db_index=True,
max_length=20,
unique=True,
verbose_name="Electoral Commission Idenfitier",
help_text="""
An ID issued by The Electoral Commission in their party register,
with the exception of Democracy Club psuedo IDs for special parties
""",
)
name = models.CharField(max_length=255, verbose_name="Party name")
register = models.CharField(
max_length=2,
db_index=True,
null=True,
verbose_name="Party register",
help_text="""
Normally either `GB` or `NI` depending on the
country the party is registered in. Pseudo-parties don't have a
register, so this field is nullable.
""",
)
status = models.CharField(
db_index=True,
max_length=255,
verbose_name="Party registration status",
choices=[
("Registered", "Registered"),
("Deregistered", "Deregistered"),
],
)
date_registered = models.DateField()
date_deregistered = models.DateField(null=True)
legacy_slug = models.CharField(
max_length=256,
blank=True,
unique=True,
help_text="""
DEPRECATED: A slug used in URLs that comes from a previous way of modelling parties.
This field will be removed in the future in favour of the `ec_id`.
""",
)
current_candidates = models.PositiveSmallIntegerField(default=0)
total_candidates = models.PositiveIntegerField(default=0)
objects = PartyQuerySet.as_manager()
def __str__(self):
return "{} ({})".format(self.name, self.ec_id)
class Meta:
ordering = ("name",)
@property
def default_emblem(self):
"""
Parties can have `n` emblems, however there is typically one that is
the "main" emblem. For example a party might have three emblems:
"The Foo Party", "The Foo Party of Wales", "The Scottish Foo Party".
When showing a single emblem without the context of a candidate, it's
useful to have a shortcut to the most generic version of the party
emblem.
"""
return self.emblems.first()
@property
def as_slack_attachment(self):
"""
Format this Party in a way that can be sent to `utils.slack.SlackHelper`
:return:
"""
attachment = {
"title": self.name,
"title_link": "http://search.electoralcommission.org.uk/English/Registrations/{}".format(
self.ec_id
),
}
if self.default_emblem:
attachment[
"image_url"
] = "http://search.electoralcommission.org.uk/Api/Registrations/Emblems/{}".format(
self.default_emblem.ec_emblem_id
)
if self.descriptions.exists():
attachment["fields"] = [
{
"title": "Descriptions",
"value": "\n".join(
[d.description for d in self.descriptions.all()]
),
"short": False,
}
]
return attachment
@property
def is_deregistered(self):
if not self.date_deregistered:
return False
return self.date_deregistered > timezone.now().date()
@property
def format_name(self):
name = self.name
if self.is_deregistered:
name = "{} (Deregistered {})".format(name, self.date_deregistered)
return name
class PartyDescription(TimeStampedModel):
"""
A party can register one or more descriptions with The Electoral Commission.
Each description can be used by a candidate on a ballot paper, along side
their name and chosen emblem.
"""
party = models.ForeignKey(
Party, on_delete=models.CASCADE, related_name="descriptions"
)
description = models.CharField(max_length=800)
date_description_approved = models.DateField(null=True)
def emblem_upload_path(instance, filename):
return "emblems/{}/{}_{}".format(
instance.party.ec_id, instance.ec_emblem_id, filename
)
class PartyEmblem(TimeStampedModel):
"""
A Party can register emblems with The Electoral Commission.
Candidates can pick of the the registered emblems to appear against their
name on ballot papers.
As a useful shortcut, we set a [default_emblem](#/definitions/Party/default_emblem)
to indicate that this is the most generic emblem.
"""
party = models.ForeignKey(
Party, on_delete=models.CASCADE, related_name="emblems"
)
ec_emblem_id = models.PositiveIntegerField(primary_key=True)
image = models.ImageField(upload_to=emblem_upload_path)
description = models.CharField(max_length=255)
date_approved = models.DateField(null=True)
default = models.BooleanField(default=False)
class Meta:
ordering = ("-default", "ec_emblem_id")
def __str__(self):
return '{} ("{}")'.format(self.pk, self.description)
``` |
[
{
"content": "```python\nimport webracer\nimport nose.plugins.attrib\nfrom . import utils\nfrom .apps import form_app\n\nutils.app_runner_setup(__name__, form_app.app, 8059)\n\n@nose.plugins.attrib.attr('client')\n@webracer.config(host='localhost', port=8059)\nclass RequestViaFormTest(webracer.WebTestCase):\n ... | [
{
"content": "<|memory_start|>```python\nimport webracer\nimport nose.plugins.attrib\nfrom . import utils\nfrom .apps import form_app\n\nutils.app_runner_setup(__name__, form_app.app, 8059)\n\n@nose.plugins.attrib.attr('client')\n@webracer.config(host='localhost', port=8059)\nclass RequestViaFormTest(webracer.W... | ```python
import webracer
import nose.plugins.attrib
from . import utils
from .apps import form_app
utils.app_runner_setup(__name__, form_app.app, 8059)
@nose.plugins.attrib.attr('client')
@webracer.config(host='localhost', port=8059)
class RequestViaFormTest(webracer.WebTestCase):
def test_get_form_as_url(self):
self.get('/method_check_form')
self.assert_status(200)
form = self.response.form()
self.get(form)
self.assertEqual('GET', self.response.body)
def test_post_form_as_url(self):
self.get('/textarea')
self.assert_status(200)
form = self.response.form()
self.post(form)
self.assertEqual('{}', self.response.body)
def test_post_form_with_elements(self):
self.get('/textarea')
self.assert_status(200)
form = self.response.form()
elements = form.elements
self.post(form, elements)
json = self.response.json
self.assertEqual(dict(field='hello world'), json)
def test_post_form_with_mutated_elements(self):
self.get('/textarea')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
elements.set_value('field', 'changed')
self.post(form, elements)
json = self.response.json
self.assertEqual(dict(field='changed'), json)
``` |
[
{
"content": "Repeat the following code:\n```python\n\"\"\"test_import.py: Functional tests for code that imports raw data from\nscientists and writes data files formatted to be included in the package\ndistributions\n\"\"\"\n\nimport re\n\n__author__ = \"Joel Dubowy\"\n\nfrom eflookup.fccs2ef.importer im... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n\"\"\"test_import.py: Functional tests for code that imports raw data from\nscientists and writes data files formatted to be included in the package\ndistributions\n\"\"\"\n\nimport re\n\n__author__ = \"Joel Dubowy\"\n\nfrom eflookup.fcc... | ```python
"""test_import.py: Functional tests for code that imports raw data from
scientists and writes data files formatted to be included in the package
distributions
"""
import re
__author__ = "Joel Dubowy"
from eflookup.fccs2ef.importer import (
Fccs2CoverTypeImporter, CoverType2EfGroupImporter,
EfGroup2EfImporter, CatPhase2EFGroupImporter
)
# TODO: put this in base class, add base class 'test_import' method, remove
# each class' test_import method, and add IMPORTER_CLASS class variable to
# each child class. This will only work if we can somehow tell py.test
# not to run test_import if the current class is the base class
def run_test(tmpdir, input_content, importer_class, expected_output):
input_file = tmpdir.join("input.csv")
input_file.write(input_content)
input_filename = str(input_file)
output_filename = input_filename.replace('input', 'output')
importer_class(input_filename).write(output_file_name=output_filename)
assert len(tmpdir.listdir()) == 2
# TODO: assert that output_filename exists
output_content = open(output_filename, 'r').read()
var_name = re.compile('([^=]+)=').search(output_content).group(1).strip()
exec(output_content)
output = locals()[var_name]
assert expected_output == output
class TestFccs2CoverTypeImporter(object):
"""Top level functional test for importing fccs id to cover type mappings
"""
INPUT_CONTENT = """fccs_id,cover_type_id,,,
0,404,,,
1,13,,,
2,131,,,
3,136,,,
4,118,,,
"""
EXPECTED_OUTPUT = {
"0":"404",
"1":"13",
"2":"131",
"3":"136",
"4":"118"
}
def test_import(self, tmpdir):
run_test(tmpdir, self.INPUT_CONTENT, Fccs2CoverTypeImporter,
self.EXPECTED_OUTPUT)
class TestCoverType2EfGroupImporter(object):
"""Top level functional test for importing cover type to ef group mappings
"""
INPUT_CONTENT = """MapID,Cover type,WF,Rx,RegionalRx,RegionalWF
1,SRM 101: Bluebunch Wheatgrass,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
2,SRM 102: Idaho Fescue,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
3,SRM 103: Green Fescue,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
4,SRM 104: Antelope Bitterbrush-Bluebunch Wheatgrass,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
5,SRM 105: Antelope Bitterbrush-Idaho Fescue,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
6,SRM 106: Bluegrass Scabland,6: Grass,6: Grass,24-26: W Grass,24-26: W Grass
7,SRM 107: Western Juniper-Big Sagebrush-Bluebunch Wheatgrass,5: Shrub,5: Shrub,30-32: W Shrub,30-32: W Shrub
13,SRM 203: Riparian Woodland,4: WF NW Conifer,3: Rx NW Conifer,27-29: W Hdwd,
"""
# Note: the output is ordered by FCCS Id
EXPECTED_OUTPUT = {
"1": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"2": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"3": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"4": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"5": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"6": {"wf": "6", "rx": "6", "regrx": "24-26", "regwf": "24-26"},
"7": {"wf": "5", "rx": "5", "regrx": "30-32", "regwf": "30-32"},
"13": {"wf": "4", "rx": "3", "regrx": "27-29", "regwf": None},
}
def test_import(self, tmpdir):
run_test(tmpdir, self.INPUT_CONTENT, CoverType2EfGroupImporter,
self.EXPECTED_OUTPUT)
class TestCatPhase2EFGroupImporter(object):
"""Top level functional test for importing
"""
INPUT_CONTENT = """,,,,Note: This mapping should be used along with EF Group by FB to assign EFs.,,,,"CO2, CH4","CO, NOx, NH3, SO2, PM25","CO2, CO, CH4","NOx, NH3, SO2, PM25","CO2, CO, CH4, NH3, PM2.5","NOx, SO2","CO2, CO, CH4","NOx, NH3, SO2, PM25","CO2, CO, CH4, PM2.5","NOx, NH3, SO2","CO2, CO, CH4","NOx, NH3, SO2, PM25","CO2, CO, CH4, PM25","NOx, NH3, SO2","CO2, CO, CH4, NH3, PM25","NOx, SO2","CO2, CO, CH4, NH3, PM25","NOx, SO2","CO2, CO, CH4",,"Most of the time, the emissions module will use these rules (but see exceptions)",,,These are just for reference purposes.,,,,,,,,,,EF Group,CO2,CO,CH4,NOx,NH3,SO2,PM2.5,
"Duff = Ground fuels: upper duff, lower duff, basal accumulations (BA), squirrel middens (SM)",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
"CWD = Class 2 and 3 snags, coarse wood under woody",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
Follow this table for where residual emissions are expected (only N/A are not),,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,Duff/CWD,Consume output variable,Category,CombustionPhase,Generic Assignment,9-11: SE Grass,9-11: SE Grass,12-14: SE Hdwd,12-14: SE Hdwd,15-17: SE Pine,15-17: SE Pine,18-20: SE Shrub,18-20: SE Shrub,21-23: W MC,21-23: W MC,24-26: W Grass,24-26: W Grass,27-29: W Hdwd,27-29: W Hdwd,30-32: W Shrub,30-32: W Shrub,30-32: W Shrub,30-32: W Shrub,33-35: Boreal,,Simplified Rules,EF Group,,Group #,# Cover Type,Note,,,,,,,SE grass F/S,9,1700,70.2,2.67,3.26,1.2,0.97,12.08,
C_over_crown,canopy,overstory,,C_over_crown_F,Overstory tree crowns,Flaming,General (1-6),10,9,13,12,16,15,19,18,22,21,25,24,28,27,31,30,31,30,33,,All outputs except for below:,Flaming/Short-term smoldering EF Groups 1-6,,1,Southeastern Forest,Assigned by fuelbed,,,,,,,SE Grass F,10,1710,,2.42,,,,,
,,,,C_over_crown_S,Overstory tree crowns,Smoldering,General (1-6),11,9,14,12,17,15,20,18,23,21,26,24,29,27,32,30,32,30,34,,,,,2,Boreal Forest,Assigned by fuelbed,,,,,,,SE Grass S,11,1538,,5.4,,,,,
,,,,C_over_crown_R,Overstory tree crowns,Residual,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,,C_wood_S1000hr_R,Woody RSC (7),,3,Western Forest - Rx,Assigned by fuelbed and burn type (prescribed or wildfire),,,,,,,SE Hdwd F/S,12,1688,78.9,2.42,2.43,1.79,0.63,14.32,
C_mid_crown,canopy,midstory,,C_mid_crown_F,Midstory tree crowns,Flaming,General (1-6),10,9,13,12,16,15,19,18,22,21,25,24,28,27,31,30,31,30,33,,C_wood_R1000hr_R,Woody RSC (7),,4,Western Forest - WF,Assigned by fuelbed and burn type (prescribed or wildfire),,,,,,,SE Hdwd F,13,1702,68.6,1.92,,,,,
,,,CWD,C_snagc3_R,Class 3 snag wood,Residual,Woody RSC (7),7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,,"SE Hdwd (F, S)","CO2, CO, CH4",,Shrub,,,,,,,,,,,,,,,,,,
,,,,C_herb_1live_R,Herbs - live primary layer,Residual,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,,,,,LLM,,,,,,,,,,,,,,,,,,
"""
EXPECTED_OUTPUT = {
'9-11': {
'canopy': {
'overstory': {
'smoldering': {'CO': '9', 'NH3': '9', 'NOx': '9', 'PM2.5': '9', 'SO2': '9'},
'flaming': {'CO': '9', 'NH3': '9', 'NOx': '9', 'PM2.5': '9', 'SO2': '9'},
'residual': {'CO': None, 'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None},
},
'midstory': {
'flaming': {'CO': '9', 'NH3': '9', 'NOx': '9', 'PM2.5': '9', 'SO2': '9'}
},
'snags class 3': {
'residual': {'CO': '7', 'NH3': '7', 'NOx': '7', 'PM2.5': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'CO': None, 'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None}
}
}
},
'12-14': {
'canopy': {
'overstory': {
'smoldering': {'NH3': '12', 'NOx': '12', 'PM2.5': '12', 'SO2': '12'},
'flaming': {'NH3': '12', 'NOx': '12', 'PM2.5': '12', 'SO2': '12'},
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None},
},
'midstory': {
'flaming': {'NH3': '12', 'NOx': '12', 'PM2.5': '12', 'SO2': '12'}
},
'snags class 3': {
'residual': {'NH3': '7', 'NOx': '7', 'PM2.5': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None}
}
}
},
'15-17': {
'canopy': {
'overstory': {
'smoldering': {'NOx': '15', 'SO2': '15'},
'flaming': {'NOx': '15', 'SO2': '15'},
'residual': {'NOx': None, 'SO2': None},
},
'midstory': {
'flaming': {'NOx': '15', 'SO2': '15'}
},
'snags class 3': {
'residual': {'NOx': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NOx': None, 'SO2': None}
}
}
},
'18-20': {
'canopy': {
'overstory': {
'smoldering': {'NH3': '18', 'NOx': '18', 'PM2.5': '18', 'SO2': '18'},
'flaming': {'NH3': '18', 'NOx': '18', 'PM2.5': '18', 'SO2': '18'},
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None},
},
'midstory': {
'flaming': {'NH3': '18', 'NOx': '18', 'PM2.5': '18', 'SO2': '18'}
},
'snags class 3': {
'residual': {'NH3': '7', 'NOx': '7', 'PM2.5': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None}
}
}
},
'21-23': {
'canopy': {
'overstory': {
'smoldering': {'NH3': '21', 'NOx': '21', 'SO2': '21'},
'flaming': {'NH3': '21', 'NOx': '21', 'SO2': '21'},
'residual': {'NH3': None, 'NOx': None, 'SO2': None},
},
'midstory': {
'flaming': {'NH3': '21', 'NOx': '21', 'SO2': '21'}
},
'snags class 3': {
'residual': {'NH3': '7', 'NOx': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NH3': None, 'NOx': None, 'SO2': None}
}
}
},
'24-26': {
'canopy': {
'overstory': {
'smoldering': {'NH3': '24', 'NOx': '24', 'PM2.5': '24', 'SO2': '24'},
'flaming': {'NH3': '24', 'NOx': '24', 'PM2.5': '24', 'SO2': '24'},
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None},
},
'midstory': {
'flaming': {'NH3': '24', 'NOx': '24', 'PM2.5': '24', 'SO2': '24'}
},
'snags class 3': {
'residual': {'NH3': '7', 'NOx': '7', 'PM2.5': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NH3': None, 'NOx': None, 'PM2.5': None, 'SO2': None}
}
}
},
'27-29': {
'canopy': {
'overstory': {
'smoldering': {'NH3': '27', 'NOx': '27', 'SO2': '27'},
'flaming': {'NH3': '27', 'NOx': '27', 'SO2': '27'},
'residual': {'NH3': None, 'NOx': None, 'SO2': None},
},
'midstory': {
'flaming': {'NH3': '27', 'NOx': '27', 'SO2': '27'}
},
'snags class 3': {
'residual': {'NH3': '7', 'NOx': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NH3': None, 'NOx': None, 'SO2': None}
}
}
},
'30-32': {
'canopy': {
'overstory': {
'smoldering': {'NOx': '30', 'SO2': '30'},
'flaming': {'NOx': '30', 'SO2': '30'},
'residual': {'NOx': None, 'SO2': None},
},
'midstory': {
'flaming': {'NOx': '30', 'SO2': '30'}
},
'snags class 3': {
'residual': {'NOx': '7', 'SO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'NOx': None, 'SO2': None}
}
}
},
'33-35': {
'canopy': {
'overstory': {
'smoldering': {'CO': '34', 'CH4': '34', 'CO2': '34'},
'flaming': {'CO': '33', 'CH4': '33', 'CO2': '33'},
'residual': {'CO': None, 'CH4': None, 'CO2': None},
},
'midstory': {
'flaming': {'CO': '33', 'CH4': '33', 'CO2': '33'}
},
'snags class 3': {
'residual': {'CO': '7', 'CH4': '7', 'CO2': '7'}
}
},
'nonwoody': {
'primary live': {
'residual': {'CO': None, 'CH4': None, 'CO2': None}
}
}
}
}
def test_import(self, tmpdir):
run_test(tmpdir, self.INPUT_CONTENT, CatPhase2EFGroupImporter,
self.EXPECTED_OUTPUT)
class TestEfGroup2EfImporter(object):
"""Top level functional test for importing ef group to emission factors mappings.
"""
INPUT_CONTENT = """g/kg,,Urbanski + Liu (1-8),,,,,,,,Revised (9-32),,,,,,,,,,,,,,,,,,,,,,,,,,
,,SE pine,Boreal,Rx NW Conifer,WF NW Conifer,W Shrub,Grass,Residual CWD,Residual Duff,SE grass F/S,SE Grass F,SE Grass S,SE Hdwd F/S,SE Hdwd F,SE Hdwd S,SE Pine F/S,SE Pine F,SE Pine S,SE Shrub F/S,SE Shrub F,SE Shrub S,W MC F/S,W MC F,W MC S,W Grass F/S,W Grass F,W Grass S,W Hdwd F/S,W Hdwd F,W Hdwd S,W Shrub F/S,W Shrub F,W Shrub S,Boreal F/S,Boreal F,Boreal S
Pollutant,Formula,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35
Carbon Dioxide,CO2,1703,1641,1598,1454,1674,1705,1408,1371,1700,1710,1538,1688,1702,1580,1606,1677,1530,1703,1743,1461,1603.16,1665.93,1592.10,1531,1638,1102,1577,1711,1489,1570,1696,1549,1606,1690,1570
Carbon Monoxide,CO,76,95,105,89.3,74,61,229,257,70.2,,,78.9,68.6,129.5,94.6,72.4,156.2,74.3,72.4,93.8,108.47,83.61,139.83,55.8,45,115.3,109.3,55.3,150.6,107.2,66.4,101.6,117,73,154
Methane,CH4,2.32,3.38,4.86,4.9,3.69,1.95,13.94,7.945,2.67,2.42,5.4,2.42,1.92,5.9,3.74,2.38,8.72,2.44,2.24,3.3,5.63,3.88,7.58,1.98,1.67,4.2,5.79,1.89,6.85,2.51,2.02,4.44,5.25,2.19,7.9
Nitrogen Oxides,NOx,1.7,1,2.06,0.49,2.18,2.18,0,0.67,3.26,,,2.43,,,1.96,,,4.23,,,3.22,,,3.26,,,3.25,,,3.57,,,2.33,,
Ammonia,NH3,0.14,0.79,1.53,1.5,1.5,1.5,0.48,2.67,1.2,,,1.79,,,0.7,0.48,1.15,2.21,,,1.07,,,0.3,,,0.58,,,1.48,1.45,2.12,2.07,,
Sulfur Dioxide,SO2,1.06,1.06,1.06,0.32,0.68,0.68,0,1.76,0.97,,,0.63,,,0.79,,,0.87,,,0.88,,,0.97,,,0.52,,,0.53,,,0.15,,
PM2.5,PM2.5,12.58,21.5,17.57,26,7.06,8.51,33,35.3,12.08,,,14.32,,,29.43,17.56,49.72,12.03,,,15.30,13.73,25.38,9.89,,,10.77,6.36,11.54,7.99,6.97,9.39,21.5,,
"""
EXPECTED_OUTPUT = {
"1": {"CH4": "2.32", "CO": "76", "CO2": "1703", "NH3": "0.14", "NOx": "1.7", "PM2.5": "12.58", "SO2": "1.06"},
"10": {"CH4": "2.42", "CO2": "1710"},
"11": {"CH4": "5.4", "CO2": "1538"},
"12": {"CH4": "2.42", "CO": "78.9", "CO2": "1688", "NH3": "1.79", "NOx": "2.43", "PM2.5": "14.32", "SO2": "0.63"},
"13": {"CH4": "1.92", "CO": "68.6", "CO2": "1702"},
"14": {"CH4": "5.9", "CO": "129.5", "CO2": "1580"},
"15": {"CH4": "3.74", "CO": "94.6", "CO2": "1606", "NH3": "0.7", "NOx": "1.96", "PM2.5": "29.43", "SO2": "0.79"},
"16": {"CH4": "2.38", "CO": "72.4", "CO2": "1677", "NH3": "0.48", "PM2.5": "17.56"},
"17": {"CH4": "8.72", "CO": "156.2", "CO2": "1530", "NH3": "1.15", "PM2.5": "49.72"},
"18": {"CH4": "2.44", "CO": "74.3", "CO2": "1703", "NH3": "2.21", "NOx": "4.23", "PM2.5": "12.03", "SO2": "0.87"},
"19": {"CH4": "2.24", "CO": "72.4", "CO2": "1743"},
"2": {"CH4": "3.38", "CO": "95", "CO2": "1641", "NH3": "0.79", "NOx": "1", "PM2.5": "21.5", "SO2": "1.06"},
"20": {"CH4": "3.3", "CO": "93.8", "CO2": "1461"},
"21": {"CH4": "5.63", "CO": "108.47", "CO2": "1603.16", "NH3": "1.07", "NOx": "3.22", "PM2.5": "15.30", "SO2": "0.88"},
"22": {"CH4": "3.88", "CO": "83.61", "CO2": "1665.93", "PM2.5": "13.73"},
"23": {"CH4": "7.58", "CO": "139.83", "CO2": "1592.10", "PM2.5": "25.38"},
"24": {"CH4": "1.98", "CO": "55.8", "CO2": "1531", "NH3": "0.3", "NOx": "3.26", "PM2.5": "9.89", "SO2": "0.97"},
"25": {"CH4": "1.67", "CO": "45", "CO2": "1638"},
"26": {"CH4": "4.2", "CO": "115.3", "CO2": "1102"},
"27": {"CH4": "5.79", "CO": "109.3", "CO2": "1577", "NH3": "0.58", "NOx": "3.25", "PM2.5": "10.77", "SO2": "0.52"},
"28": {"CH4": "1.89", "CO": "55.3", "CO2": "1711", "PM2.5": "6.36"},
"29": {"CH4": "6.85", "CO": "150.6", "CO2": "1489", "PM2.5": "11.54"},
"3": {"CH4": "4.86", "CO": "105", "CO2": "1598", "NH3": "1.53", "NOx": "2.06", "PM2.5": "17.57", "SO2": "1.06"},
"30": {"CH4": "2.51", "CO": "107.2", "CO2": "1570", "NH3": "1.48", "NOx": "3.57", "PM2.5": "7.99", "SO2": "0.53"},
"31": {"CH4": "2.02", "CO": "66.4", "CO2": "1696", "NH3": "1.45", "PM2.5": "6.97"},
"32": {"CH4": "4.44", "CO": "101.6", "CO2": "1549", "NH3": "2.12", "PM2.5": "9.39"},
"33": {"CH4": "5.25", "CO": "117", "CO2": "1606", "NH3": "2.07", "NOx": "2.33", "PM2.5": "21.5", "SO2": "0.15"},
"34": {"CH4": "2.19", "CO": "73", "CO2": "1690"},
"35": {"CH4": "7.9", "CO": "154", "CO2": "1570"},
"4": {"CH4": "4.9", "CO": "89.3", "CO2": "1454", "NH3": "1.5", "NOx": "0.49", "PM2.5": "26", "SO2": "0.32"},
"5": {"CH4": "3.69", "CO": "74", "CO2": "1674", "NH3": "1.5", "NOx": "2.18", "PM2.5": "7.06", "SO2": "0.68"},
"6": {"CH4": "1.95", "CO": "61", "CO2": "1705", "NH3": "1.5", "NOx": "2.18", "PM2.5": "8.51", "SO2": "0.68"},
"7": {"CH4": "13.94", "CO": "229", "CO2": "1408", "NH3": "0.48", "NOx": "0", "PM2.5": "33", "SO2": "0"},
"8": {"CH4": "7.945", "CO": "257", "CO2": "1371", "NH3": "2.67", "NOx": "0.67", "PM2.5": "35.3", "SO2": "1.76"},
"9": {"CH4": "2.67", "CO": "70.2", "CO2": "1700", "NH3": "1.2", "NOx": "3.26", "PM2.5": "12.08", "SO2": "0.97"}
}
def test_import(self, tmpdir):
run_test(tmpdir, self.INPUT_CONTENT, EfGroup2EfImporter,
self.EXPECTED_OUTPUT)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\nimport codecs\nimport csv\nimport json\nimport re\nimport warnings\nimport codecs\nfrom openpyxl import load_workbook, Workbook\nfrom time import strftime\n\nfrom django.contrib.auth.models import User\nfrom django.forms.models import model_to_... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\nimport codecs\nimport csv\nimport json\nimport re\nimport warnings\nimport codecs\nfrom openpyxl import load_workbook, Workbook\nfrom time import strftime\n\nfrom django.contrib.auth.models import User\nfrom django.forms.models ... | ```python
import codecs
import csv
import json
import re
import warnings
import codecs
from openpyxl import load_workbook, Workbook
from time import strftime
from django.contrib.auth.models import User
from django.forms.models import model_to_dict
from django.http import JsonResponse, Http404, HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.utils.datastructures import MultiValueDictKeyError
from django.views import View
from django.views.generic import DeleteView, DetailView, FormView, ListView, \
TemplateView
from django.views.generic.edit import ModelFormMixin
from django.views.generic.detail import SingleObjectMixin
from crm.mixins import ChangeRecord
from crm.models import Person, Changes
from crm.constants import GEO_CHOICES, CAT_CHOICES, DIV_CHOICES
from marketing.constants import *
from marketing.forms import *
from marketing.mixins import *
from marketing.models import *
######################
# Main page views
######################
class Index(IndexCSVResponseMixin, MarketingPermissionMixin,
GeneratePaginationList, ListView):
template_name = 'marketing/index.html'
context_object_name = 'records'
queryset = Person.objects.all()
paginate_by=250
query_param_terms = {
'main_category': 'main_category',
'main_category2': 'main_category2',
'geo': 'geo',
'division1': 'division1',
'division2': 'division2',
'company': 'company__icontains',
'name': 'name__icontains',
'title': 'title__icontains',
'dept': 'dept__icontains',
'phone': 'phone__icontains',
'do_not_call': 'do_not_call',
'email': 'email__icontains',
'do_not_email': 'do_not_email',
'industry': 'industry__icontains',
'email_alternate': 'email_alternate__icontains'}
def get_ordering(self):
"""
Return the field or fields to use for ordering the queryset.
Overrides default method
"""
self.order = self.request.GET.get('order', 'asc')
sort_by = self.request.GET.get('sort_by', None)
if sort_by and self.order == 'desc':
sort_by = '-' + sort_by
return sort_by
def get_paginate_by(self, queryset):
return super(Index, self).get_paginate_by(queryset)
def _filter_queryset(self, queryset):
query_string = ''
query_params = {}
query_prefill = {}
for param in self.request.GET:
if param in self.query_param_terms:
query_string += param + '=' + self.request.GET[param] + '&'
query_prefill[param] = self.request.GET[param]
if self.request.GET[param] in ('true', 'false'):
tf_bool = self.request.GET[param] == 'true'
query_params[self.query_param_terms[param]] = tf_bool
else:
query_params[self.query_param_terms[param]] = \
self.request.GET[param]
query_string = re.sub(r'\s', '%20', query_string)
query_string = query_string[:-1]
self.filter_string = query_string if len(query_string) > 0 else None
queryset = queryset.filter(**query_params)
self.query_prefill = query_prefill
return queryset
def get_queryset(self):
queryset = super(Index, self).get_queryset()
queryset = self.filtered_queryset = self._filter_queryset(queryset)
self.queryset_length = queryset.count()
return queryset
def paginate_queryset(self, queryset, page_size):
"""
Paginate the queryset, if needed.
Override default method to go to first or last page if out of bounds
"""
paginator = self.get_paginator(
queryset, page_size, orphans=self.get_paginate_orphans(),
allow_empty_first_page=self.get_allow_empty())
page_kwarg = self.page_kwarg
page = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_("Page is not 'last', nor can it be converted to an int."))
if page_number < 1:
page_number = 1
if page_number > paginator.num_pages:
page_number = paginator.num_pages
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, page.has_other_pages())
except InvalidPage as e:
raise Http404(_('Invalid page (%(page_number)s): %(message)s') % {
'page_number': page_number,
'message': str(e)
})
def get_context_data(self, **kwargs):
context = super(Index, self).get_context_data(**kwargs)
context['geo_choices'] = GEO_CHOICES
context['cat_choices'] = CAT_CHOICES
context['div_choices'] = DIV_CHOICES
if context['is_paginated']:
context['pagination_list'] = self.generate_pagination_list(context)
context['order'] = self.order
sort_by = self.get_ordering()
if sort_by and sort_by[0] == '-':
sort_by = sort_by[1:]
context['sort_by'] = sort_by
context['filter_string'] = self.filter_string
context['query_prefill'] = self.query_prefill
context['upload_file_form'] = UploadFileForm()
context['queryset_length'] = self.queryset_length
return context
class ProcessChanges(MarketingPermissionMixin, GeneratePaginationList,
ListView):
template_name = 'marketing/changes.html'
context_object_name = 'all_changes'
queryset = Changes.objects.all()
paginate_by=250
def get_context_data(self, **kwargs):
context = super(ProcessChanges, self).get_context_data(**kwargs)
if context['is_paginated']:
context['pagination_list'] = self.generate_pagination_list(context)
context['total_qs_length'] = self.object_list.count()
return context
class UploadFile(MarketingPermissionMixin, TemplateView):
template_name = 'marketing/upload.html'
error_message = None
uploaded_file = None
def _csv_row_is_not_blank(self, row):
for cell_num in range(self.num_cols):
if row[cell_num] not in ('', None):
return True
return False
def _add_csv_row_to_db(self, row, is_first, row_number):
new_row = UploadedRow(
parent_file = self.uploaded_file,
row_is_first=is_first,
row_number=row_number,
)
new_row.save()
for cell_num in range(self.num_cols):
new_cell = UploadedCell(
parent_row = new_row,
cell_order = cell_num,
content=row[cell_num],
)
new_cell.save()
def _add_csv_file_to_db(self, decoder):
f = codecs.iterdecode(
self.upload_file_form.cleaned_data['marketing_file'],
decoder
)
reader = csv.reader(f)
if not self.uploaded_file:
new_file = UploadedFile(
filename=self.upload_file_form.cleaned_data['marketing_file'].name,
uploaded_by=self.request.user,
num_columns=0,
)
new_file.save()
self.uploaded_file = new_file
is_first_row = True
self.num_cols = None
row_number = 0
for row in reader:
if not self.num_cols:
self.num_cols = len(row)
if self._csv_row_is_not_blank(row):
self._add_csv_row_to_db(row, is_first_row, row_number)
is_first_row = False
row_number += 1
if self.num_cols:
self.uploaded_file.num_columns = self.num_cols
self.uploaded_file.save()
def _process_csv(self):
decoder_list = ['utf-8', 'windows-1252']
if self.request.encoding and self.request_encoding not in decoder_list:
decoder.insert(0, self.request.encoding)
successful_transcription = False
for decode_attempt in range(len(decoder_list)):
if not successful_transcription:
try:
self._add_csv_file_to_db(decoder_list[decode_attempt])
successful_transcription = True
except UnicodeDecodeError:
if self.uploaded_file:
UploadedRow.objects.filter(
parent_file=self.uploaded_file
).delete()
def _xlsx_row_is_not_blank(self, ws, row_num, num_cols):
for col_num in range(1, num_cols+1):
if ws.cell(row=row_num, column=col_num).value not in ('', None):
return True
return False
def _add_xlsx_row_to_db(self, ws, row_num, num_cols):
new_row = UploadedRow(
parent_file = self.uploaded_file,
row_is_first = row_num == 1,
row_number = row_num,
)
new_row.save()
for col_num in range(1, num_cols+1):
new_cell = UploadedCell(
parent_row=new_row,
cell_order=col_num,
content=ws.cell(row=row_num, column=col_num).value
)
new_cell.save()
def _process_xlsx(self, datafile):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
wb = load_workbook(datafile, read_only=True, data_only=True)
ws = wb.active
num_rows = ws.max_row
num_cols = ws.max_column
new_file = UploadedFile(
filename=self.upload_file_form.cleaned_data['marketing_file'].name,
uploaded_by=self.request.user,
num_columns=num_cols
)
new_file.save()
self.uploaded_file = new_file
for row_num in range(1, num_rows+1):
if self._xlsx_row_is_not_blank(ws, row_num, num_cols):
self._add_xlsx_row_to_db(ws, row_num, num_cols)
def post(self, request, *args, **kwargs):
self.upload_file_form = UploadFileForm(request.POST, request.FILES)
if self.upload_file_form.is_valid():
uploaded_file = request.FILES['marketing_file']
try:
self._process_xlsx(uploaded_file)
except Exception as e:
if self.uploaded_file:
self.uploaded_file.delete()
try:
self._process_csv()
except Exception as e:
self.error_message = 'File was not readable.\n' + \
'It should be either xlsx or csv format, with ' + \
'either utf-8 or windows-1252 encoding.\n' + \
'If you are not able to fix this, please talk to ' + \
'Chris.\n\n' + \
'The specific error message was: \n\n' + str(e)
else:
self.error_message = 'Invalid File Submitted'
datafile = None
datafile_type_is = None
context = self.get_context_data(**kwargs)
return super(UploadFile, self).render_to_response(context)
def get_context_data(self, **kwargs):
context = super(UploadFile, self).get_context_data(**kwargs)
context['error_message'] = self.error_message
context['unprocessed_files'] = \
UploadedFile.objects.all().order_by('-uploaded_at')
context['upload_file_form'] = UploadFileForm()
return context
#####################
# Ajax views
#####################
class Add(MarketingPermissionMixin, TemplateView):
template_name = 'marketing/index_addins/table_row.html'
def _duplicate_person(self, request):
self._person = Person.objects.get(pk=request.POST['person_id'])
self._person.pk = None
self._person.created_by = request.user
self._person.modified_by = request.user
self._person.date_created = timezone.now()
self._person.date_modified = timezone.now()
self._person.do_not_call = False
self._person.do_not_email = False
self._person.email_alternate = ''
self._person.save()
def _new_person(self, request):
self._person = Person(
date_created=timezone.now(),
created_by=request.user,
date_modified=timezone.now(),
modified_by=request.user,
)
self._person.save()
def get(self, request, *args, **kwargs):
raise Http404()
def post(self, request, *args, **kwargs):
if 'person_id' in request.POST:
try:
self._duplicate_person(request)
except Person.DoesNotExist:
self._new_person(request)
else:
self._new_person(request)
context = self.get_context_data(**kwargs)
return super(Add, self).render_to_response(context)
def get_context_data(self, **kwargs):
context = super(Add, self).get_context_data(**kwargs)
context['record'] = self._person
context['geo_choices'] = GEO_CHOICES
context['cat_choices'] = CAT_CHOICES
context['div_choices'] = DIV_CHOICES
return context
class BulkUpdate(MarketingPermissionMixin, View):
def get(self, request, *args, **kwargs):
raise Http404()
def post(self, request, *arts, **kwargs):
data = json.loads(request.POST['json'])
person_ids = data['record_list']
field_data = data['field_dict']
successful_updates = {}
Person.objects.filter(id__in=person_ids).update(**field_data)
return HttpResponse(status=204)
class ChangeDetails(MarketingPermissionMixin, ModelFormMixin, DetailView):
template_name = 'marketing/changes_addins/compare_panel.html'
form_class = PersonDetailForm
model = Changes
person_exists = False
def _get_changes_form(self):
if hasattr(self, 'changes_obj') and self.changes_obj is not None:
return ChangesDetailForm(instance=self.changes_obj)
return ChangesDetailForm()
def delete(self):
self.changes_obj.delete()
def form_valid(self, form):
"""
Overriding because Ajax and don't want Redirect
"""
self.object = form.save()
self.delete()
return JsonResponse({})
def get_context_data(self, **kwargs):
context = super(ChangeDetails, self).get_context_data(**kwargs)
context['person_exists'] = self.person_exists
context['changes_form'] = self._get_changes_form()
context['changes_record'] = self.changes_obj
return context
def get_object(self, queryset=None):
"""
Extends default method because we need to return a Person object
and the method is called with the pk for a Changes object
(Default method will return a Changes record)
"""
self.changes_obj = super(ChangeDetails, self).get_object()
try:
obj = Person.objects.get(pk=self.changes_obj.orig_id)
self.person_exists = True
except Person.DoesNotExist:
obj = None
return obj
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.form = self.get_form()
if self.form.is_valid():
return self.form_valid(self.form)
else:
return self.form_invalid(self.form)
class DeleteChange(MarketingPermissionMixin, DeleteView):
model = Changes
def delete(self, request, *args, **kwargs):
"""
Override default method b/c AJAX and don't want a redirect
"""
self.object = self.get_object()
self.object.delete()
return HttpResponse(status=200)
class DeletePerson(ChangeRecord, MarketingPermissionMixin, View):
def get(self, request, *args, **kwargs):
raise Http404()
def post(self, request, *args, **kwargs):
person = get_object_or_404(Person, pk=request.POST['record_id'])
pk = person.pk
Changes.objects.filter(orig_id=pk).delete()
if person.has_registration_history():
self.add_change_record(person, 'delete')
person.delete()
return HttpResponse(status=204)
class DeleteUpload(MarketingPermissionMixin, DeleteView):
queryset = UploadedFile.objects.all()
def get(self, request, *args, **kwargs):
raise Http404()
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return HttpResponse(status=200)
class DownloadErrors(MarketingPermissionMixin, View):
def _build_row_list(self, row):
row_text = []
for i in range(self.upload.num_columns + 1):
try:
row_text.append(
UploadedCell.objects.get(parent_row=row,
cell_order=i).content
)
except UploadedCell.DoesNotExist:
if i == self.upload.num_columns:
row_text.append(row.error_message)
else:
row_text.append('')
return row_text
def _csv_response(self, filename):
filename += 'csv'
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = \
('attachment; filename="%s.csv"' % filename)
writer = csv.writer(response)
filerows = UploadedRow.objects.filter(parent_file=self.upload)
for row in filerows:
row_text = self._build_row_list(row)
writer.writerow(row_text)
self.upload.delete()
return response
def _xlsx_response(self, filename):
filename += 'xlsx'
response = HttpResponse(content_type='application/vnd.mx-excel')
response['Content-Disposition'] = \
('attachment; filename="%s.xlsx"' % filename)
wb = Workbook()
ws = wb.active
filerows = UploadedRow.objects.filter(parent_file=self.upload)
for row in filerows:
row_text = self._build_row_list(row)
ws.append(row)
wb.save(response)
self.upload.delete()
return response
def get(self, request, *args, **kwargs):
ok_to_render = False
self.filetype = request.GET.get('fileformat', None)
self.upload = get_object_or_404(UploadedFile,
pk=request.GET['file_id'])
if self.filetype:
return self.render_to_response()
else:
raise Http404('invalid format specified')
def render_to_response(self):
if self.upload.filename[-3:] == 'csv':
filename = self.upload.filename[:-4] + '_errors.'
else:
filename = self.upload.filename[:-5] + '_errors.'
if self.filetype == 'csv':
return self._csv_response(filename)
else:
return self._xlsx_response(filename)
class FieldMatcher(MarketingPermissionMixin, DetailView):
template_name = 'marketing/upload_addins/field_matcher.html'
queryset = UploadedFile.objects.all()
context_object_name = 'uploaded_file'
def _first_ten_rows(self):
rows = UploadedRow.objects.filter(
parent_file=self.object,
row_is_first=False
).order_by('row_number')[:10]
return []
def get_context_data(self, **kwargs):
context = super(FieldMatcher, self).get_context_data(**kwargs)
try:
first_row = UploadedRow.objects.get(
parent_file=self.object, row_is_first=True
)
except UploadedRow.DoesNotExist:
first_row = None
if first_row:
header_cells = UploadedCell.objects.filter(
parent_row=first_row
).order_by('cell_order')
else:
header_cells = None
context['header_cells'] = header_cells
context['first_ten_rows'] = UploadedRow.objects.filter(
parent_file=self.object,
row_is_first=False,
).order_by('row_number')[:10]
context['selector_form'] = FieldSelectorForm()
return context
class ProcessUpload(MarketingPermissionMixin, View):
def get(self, request, *args, **kwargs):
raise Http404()
def _delete_file(self):
num_rows = UploadedRow.objects.filter(parent_file=self.upload).count()
if num_rows == 0:
self.upload.delete()
return True
elif num_rows == 1:
row_is_do_not_import = UploadedRow.objects.all()[0].row_is_first
if row_is_do_not_import:
self.upload.delete()
return True
else:
return False
def _import_row(self, row, cell_map):
person_attrs = {}
for cell in UploadedCell.objects.filter(parent_row=row):
if cell.cell_order in cell_map:
person_attrs[cell_map[cell.cell_order]] = cell.content
person_id = person_attrs.pop('id', None)
email = person_attrs['email'] if 'email' in person_attrs else None
try:
person = Person.objects.get(pk=person_id)
except Person.DoesNotExist:
if email:
try:
person = Person.objects.filter(email=email)[0]
except IndexError:
person = Person(
date_created = timezone.now(),
created_by = self.request.user,
)
except ValueError:
row.has_error = True
row.error_message = str(person_id) + ' is not a valid record id. ' + \
'It should be an integer.'
row.save()
self.rows_imported['total'] += 1
return
person_attrs['date_modified'] = timezone.now()
person_attrs['modified_by'] = self.request.user
if 'f1-category-split' in person_attrs:
f1 = person_attrs.pop('f1-category-split')
try:
person_attrs['main_category'], \
person_attrs['main_category2'] = F1_SPLIT[f1].lower()
except KeyError:
person_attrs['main_category'] = \
person_attrs['main_category2'] = 'NA'
if 'geo' in person_attrs:
person_attrs['geo'] = GEO_DICT[person_attrs['geo'].lower()] \
if person_attrs['geo'].lower() in GEO_DICT else 'Unknown'
try:
for key, value in person_attrs.items():
setattr(person, key, value)
person.save()
self.rows_imported['success'] += 1
self.rows_imported['total'] += 1
if not row.row_is_first:
row.delete()
except Exception as e:
row.has_error = True
row.error_message = str(e)
row.save()
self.rows_imported['total'] += 1
def post(self, request, *arts, **kwargs):
self.rows_imported = {'success': 0, 'total': 0}
data = json.loads(request.POST['json'])
include_first_row = request.POST['ignore_first_row'] == 'false'
self.upload = UploadedFile.objects.get(pk = data['file_id'])
cell_map = {int(y[0]):x for x,y in data['field_matches'].items()}
for row in UploadedRow.objects.filter(parent_file=self.upload):
if include_first_row or (not row.row_is_first):
self._import_row(row, cell_map)
file_fully_processed = self._delete_file()
response_json = {
'processComplete': file_fully_processed,
'rowsImported': self.rows_imported
}
return JsonResponse(response_json)
class RestoreDeletedRecord(MarketingPermissionMixin, JsonResponseMixin,
SingleObjectMixin, View):
model = Changes
http_method_names = ['post',]
def _restore_person(self):
restore_vals = model_to_dict(self.changes_obj)
# need to restore original id value
restore_vals['id'] = restore_vals.pop('orig_id')
# ForeignKeys are returned as keys -> need to convert to objects
for key in ('created_by', 'modified_by'):
restore_vals[key] = User.objects.get(pk=restore_vals.pop(key))
# action is not in Person model so remove kwarg
restore_vals.pop('action')
self.object = Person(**restore_vals)
self.object.save()
def delete(self, request, *args, **kwargs):
self.deleted_pk = self.changes_obj.pk
self.changes_obj.delete()
return self.render_to_response(**kwargs)
def get_json_data(self, **kwargs):
return {
'change_id': kwargs['pk'],
'person_id': self.object.pk,
}
def post(self, request, *args, **kwargs):
self.changes_obj = self.get_object()
self._restore_person()
return self.delete(request, *args, **kwargs)
class UpdatePerson(MarketingPermissionMixin, View):
def get(self, request, *args, **kwargs):
raise Http404()
def post(self, request, *args, **kwargs):
person = get_object_or_404(Person, pk=request.POST['record_id'])
update_field = request.POST['field']
new_value = request.POST['new_value']
old_value = getattr(person, update_field)
if new_value in ('true', 'false') and old_value in (True, False):
new_value = new_value == 'true'
if new_value != old_value:
setattr(person, update_field, new_value)
person.date_modified = timezone.now()
person.modified_by = request.user
person.save()
person_vals = {
'date_modified': person.date_modified.strftime('%m/%d/%Y'),
'state_prov': person.state_prov(),
}
return JsonResponse(person_vals)
``` |
[
{
"content": "Here is a code file:\n```python\n#!/usr/bin/env python\n# ----------------------------------------------------------------------\n# Numenta Platform for Intelligent Computing (NuPIC)\n# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement\n# with Numenta, Inc., for a separate license fo... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python\n# ----------------------------------------------------------------------\n# Numenta Platform for Intelligent Computing (NuPIC)\n# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement\n# with Numenta, Inc., for a sep... | ```python
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Add Context Manager (with ...) support for Jython/Python 2.5.x (
# ClientJobManager used to use Jython); it's a noop in newer Python versions.
from __future__ import with_statement
import collections
import logging
from optparse import OptionParser
import sys
import traceback
import uuid
from nupic.support.decorators import logExceptions #, logEntryExit
from nupic.database.Connection import ConnectionFactory
from nupic.support.configuration import Configuration
from nupic.support import pymysqlhelpers
_MODULE_NAME = "nupic.database.ClientJobsDAO"
_LOGGER = logging.getLogger(__name__)
class InvalidConnectionException(Exception):
""" This exception is raised when a worker tries to update a model record that
belongs to another worker. Ownership of a model is determined by the database
connection id
"""
pass
# Create a decorator for retrying idempotent SQL operations upon transient MySQL
# failures.
# WARNING: do NOT indiscriminately decorate non-idempotent operations with this
# decorator as it # may case undesirable side-effects, such as multiple row
# insertions, etc.
# NOTE: having this as a global permits us to switch parameters wholesale (e.g.,
# timeout)
g_retrySQL = pymysqlhelpers.retrySQL(logger=_LOGGER)
def _abbreviate(text, threshold):
""" Abbreviate the given text to threshold chars and append an ellipsis if its
length exceeds threshold; used for logging;
NOTE: the resulting text could be longer than threshold due to the ellipsis
"""
if text is not None and len(text) > threshold:
text = text[:threshold] + "..."
return text
class ClientJobsDAO(object):
""" This Data Access Object (DAO) is used for creating, managing, and updating
the ClientJobs database. The ClientJobs database is a MySQL database shared by
the UI, Stream Manager (StreamMgr), and the engine. The clients (UI and
StreamMgr) make calls to this DAO to request new jobs (Hypersearch, stream
jobs, model evaluations, etc.) and the engine queries and updates it to manage
and keep track of the jobs and report progress and results back to the
clients.
This class is primarily a collection of static methods that work with the
client jobs database. But, rather than taking the approach of declaring each
method as static, we provide just one static class method that returns a
reference to the (one) ClientJobsDAO instance allocated for the current
process (and perhaps in the future, for the current thread). This approach
gives us the flexibility in the future of perhaps allocating one instance per
thread and makes the internal design a bit more compartmentalized (by enabling
the use of instance variables). Note: This is generally referred to as
the singleton pattern.
A typical call is made in the following manner:
ClientJobsDAO.get().jobInfo()
If the caller desires, they have the option of caching the instance returned
from ClientJobsDAO.get(), i.e.:
cjDAO = ClientJobsDAO.get()
cjDAO.jobInfo()
cjDAO.jobSetStatus(...)
There are two tables in this database, the jobs table and the models table, as
described below. The jobs table keeps track of all jobs. The models table is
filled in by hypersearch jobs with the results of each model that it
evaluates.
Jobs table. The field names are given as:
internal mysql field name (public API field name)
field description
---------------------------------------------------------------------------
job_id (jobId): Generated by the database when a new job is inserted by a
client. This is an auto-incrementing ID that is unique among all
jobs.
client (client): The name of the client (i.e. 'UI', 'StreamMgr', etc.).
client_info (clientInfo): Arbitrary data specified by client.
client_key (clientKey): Foreign key as defined by the client.
cmd_line (cmdLine): Command line to be used to launch each worker process for
the job.
params (params): JSON encoded dict of job specific parameters that are
useful to the worker processes for this job. This field is provided
by the client when it inserts the job and can be fetched out of the
database by worker processes (based on job_id) if needed.
job_hash (jobHash): hash of the job, provided by the client, used for
detecting identical jobs when they use the jobInsertUnique() call.
Clients that don't care about whether jobs are unique or not do not
have to generate or care about this field.
status (status): The engine will periodically update the status field as the
job runs.
This is an enum. Possible values are:
STATUS_NOTSTARTED client has just added this job to the table
STATUS_STARTING: a CJM is in the process of launching this job in the
engine
STATUS_RUNNING: the engine is currently running this job
STATUS_TESTMODE: the job is being run by the test framework
outside the context of hadoop, should be
ignored
STATUS_COMPLETED: the job has completed. The completion_reason
field describes the manner in which it
completed
completion_reason (completionReason): Why this job completed. Possible values
are:
CMPL_REASON_SUCCESS: job completed successfully
CMPL_REASON_KILLED: job was killed by ClientJobManager
CMPL_REASON_CANCELLED: job was cancelled by user
CMPL_REASON_ERROR: job encountered an error. The completion_msg
field contains a text description of the error
completion_msg (completionMsg): Text description of error that occurred if job
terminated with completion_reason of CMPL_REASON_ERROR or
CMPL_REASON_KILLED
worker_completion_msg (workerCompletionMsg): Why this job completed, according
to the worker(s).
cancel (cancel): Set by the clent if/when it wants to cancel a job.
Periodically polled by the CJM and used as a signal to kill the job.
TODO: the above claim doesn't match current reality: presently,
Hypersearch and Production workers poll the cancel field.
start_time (startTime): date and time of when this job started.
end_time (endTime): date and time of when this job completed.
results (results): A JSON encoded dict of the results of a hypersearch job.
The dict contains the following fields. Note that this dict
is NULL before any model has reportedits results:
bestModel: The modelID of the best performing model so far
bestValue: The value of the optimized metric for the best model
_eng_last_update_time (engLastUpdateTime): Time stamp of last update. Used
for detecting stalled jobs.
_eng_cjm_conn_id (engCjmConnId): The database client connection ID of the CJM
(Client Job Manager) starting up this job. Set and checked while the
job is in the 'starting' phase. Used for detecting and dealing with
stalled CJM's
_eng_worker_state (engWorkerState): JSON encoded data structure
for private use by the workers.
_eng_status (engStatus): String used to send status messages from the engine
to the UI. For informative purposes only.
_eng_model_milestones (engModelMilestones): JSON encoded object with
information about global model milestone results.
minimum_workers (minimumWorkers): min number of desired workers at a time.
If 0, no workers will be allocated in a crunch
maximum_workers (maximumWorkers): max number of desired workers at a time. If
0, then use as many as practical given load on the cluster.
priority (priority): job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are higher
priority (up to ClientJobsDAO.MAX_JOB_PRIORITY), and negative values
are lower priority (down to ClientJobsDAO.MIN_JOB_PRIORITY)
_eng_allocate_new_workers (engAllocateNewWorkers): Should the scheduling
algorithm allocate new workers to this job? If a specialized worker
willingly gives up control, we set this field to FALSE to avoid
allocating new workers.
_eng_untended_dead_workers (engUntendedDeadWorkers): If a specialized worker
fails or is killed by the scheduler, we set this feild to TRUE to
indicate that the worker is dead.
num_failed_workers (numFailedWorkers): The number of failed specialized workers
for this job. if the number of failures is greater than
max.failed.attempts, we mark the job as failed
last_failed_worker_error_msg (lastFailedWorkerErrorMsg): Error message of the
most recent failed specialized worker
Models table: field description
---------------------------------------------------------------------------
model_id (modelId): Generated by the database when the engine inserts a new
model. This is an auto-incrementing ID that is globally unique
among all models of all jobs.
job_id (jobId) : The job_id of the job in the Jobs Table that this model
belongs to.
params (params): JSON encoded dict of all the parameters used to generate
this particular model. The dict contains the following properties:
paramValues = modelParamValuesDict,
paramLabels = modelParamValueLabelsDict,
experimentName = expName
status (status): Enumeration of the model's status. Possible values are:
STATUS_NOTSTARTED: This model's parameters have been chosen, but
no worker is evaluating it yet.
STATUS_RUNNING: This model is currently being evaluated by a
worker
STATUS_COMPLETED: This model has finished running. The
completion_reason field describes why it
completed.
completion_reason (completionReason) : Why this model completed. Possible
values are:
CMPL_REASON_EOF: model reached the end of the dataset
CMPL_REASON_STOPPED: model stopped because it reached maturity
and was not deemed the best model.
CMPL_REASON_KILLED: model was killed by the terminator logic
before maturing and before reaching EOF
because it was doing so poorly
CMPL_REASON_ERROR: model encountered an error. The completion_msg
field contains a text description of the
error
completion_msg (completionMsg): Text description of error that occurred if
model terminated with completion_reason of CMPL_REASON_ERROR or
CMPL_REASON_KILLED
results (results): JSON encoded structure containing the latest online
metrics produced by the model. The engine periodically updates this
as the model runs.
optimized_metric(optimizedMetric): The value of the metric over which
this model is being optimized. Stroring this separately in the database
allows us to search through to find the best metric faster
update_counter (updateCounter): Incremented by the UI whenever the engine
updates the results field. This makes it easier and faster for the
UI to determine which models have changed results.
num_records (numRecords): Number of records (from the original dataset,
before aggregation) that have been processed so far by this model.
Periodically updated by the engine as the model is evaluated.
start_time (startTime): Date and time of when this model started being
evaluated.
end_time (endTime): Date and time of when this model completed.
cpu_time (cpuTime): How much actual CPU time was spent evaluating this
model (in seconds). This excludes any time the process spent
sleeping, or otherwise not executing code.
model_checkpoint_id (modelCheckpointId): Checkpoint identifier for this model
(after it has been saved)
_eng_params_hash (engParamsHash): MD5 hash of the params. Used for detecting
duplicate models.
_eng_particle_hash (engParticleHash): MD5 hash of the model's particle (for
particle swarm optimization algorithm).
_eng_last_update_time (engLastUpdateTime): Time stamp of last update. Used
for detecting stalled workers.
_eng_task_tracker_id (engTaskTrackerId): ID of the Hadoop Task Tracker
managing the worker
_eng_worker_id (engWorkerId): ID of the Hadoop Map Task (worker) for this task
_eng_attempt_id (engAttemptId): Hadoop attempt ID of this task attempt
_eng_worker_conn_id (engWorkerConnId): database client connection ID of the
hypersearch worker that is running this model
_eng_milestones (engMilestones): JSON encoded list of metric values for the
model at each milestone point.
_eng_stop (engStop): One of the STOP_REASON_XXX enumerated value strings
(or None). This gets set to STOP_REASON_KILLED if the terminator
decides that the performance of this model is so poor that it
should be terminated immediately. This gets set to STOP_REASON_STOPPED
if Hypersearch decides that the search is over and this model
doesn't have to run anymore.
_eng_matured (engMatured): Set by the model maturity checker when it decides
that this model has "matured".
"""
# Job priority range values.
#
# Higher-priority jobs will be scheduled to run at the expense of the
# lower-priority jobs, and higher-priority job tasks will preempt those with
# lower priority if there is inadequate supply of scheduling slots. Excess
# lower priority job tasks will starve as long as slot demand exceeds supply.
MIN_JOB_PRIORITY = -100 # Minimum job scheduling priority
DEFAULT_JOB_PRIORITY = 0 # Default job scheduling priority
MAX_JOB_PRIORITY = 100 # Maximum job scheduling priority
# Equates for job and model status
STATUS_NOTSTARTED = "notStarted"
STATUS_STARTING = "starting"
STATUS_RUNNING = "running"
STATUS_TESTMODE = "testMode"
STATUS_COMPLETED = "completed"
# Equates for job and model completion_reason field
CMPL_REASON_SUCCESS = "success" # jobs only - job completed successfully
CMPL_REASON_CANCELLED = "cancel" # jobs only - canceled by user;
# TODO: presently, no one seems to set the
# CANCELLED reason
CMPL_REASON_KILLED = "killed" # jobs or models - model killed by
# terminator for poor results or job
# killed by ClientJobManager
CMPL_REASON_ERROR = "error" # jobs or models - Encountered an error
# while running
CMPL_REASON_EOF = "eof" # models only - model reached end of
# data set
CMPL_REASON_STOPPED = "stopped" # models only - model stopped running
# because it matured and was not deemed
# the best model.
CMPL_REASON_ORPHAN = "orphan" # models only - model was detected as an
# orphan because the worker running it
# failed to update the last_update_time.
# This model is considered dead and a new
# one may be created to take its place.
# Equates for the model _eng_stop field
STOP_REASON_KILLED = "killed" # killed by model terminator for poor
# results before it matured.
STOP_REASON_STOPPED = "stopped" # stopped because it had matured and was
# not deemed the best model
# Equates for the cleaned field
CLEAN_NOT_DONE = "notdone" # Cleaning for job is not done
CLEAN_DONE = "done" # Cleaning for job is done
# Equates for standard job classes
JOB_TYPE_HS = "hypersearch"
JOB_TYPE_PM = "production-model"
JOB_TYPE_SM = "stream-manager"
JOB_TYPE_TEST = "test"
HASH_MAX_LEN = 16
""" max size, in bytes, of the hash used for model and job identification """
CLIENT_MAX_LEN = 8
""" max size, in bytes of the 'client' field's value """
class _TableInfoBase(object):
""" Common table info fields; base class """
__slots__ = ("tableName", "dbFieldNames", "publicFieldNames",
"pubToDBNameDict", "dbToPubNameDict",)
def __init__(self):
self.tableName = None
""" Database-qualified table name (databasename.tablename) """
self.dbFieldNames = None
""" Names of fields in schema """
self.publicFieldNames = None
""" Public names of fields generated programmatically: e.g.,
word1_word2_word3 => word1Word2Word3 """
self.pubToDBNameDict = None
self.dbToPubNameDict = None
""" These dicts convert public field names to DB names and vice versa """
class _JobsTableInfo(_TableInfoBase):
__slots__ = ("jobInfoNamedTuple",)
# The namedtuple classes that we use to return information from various
# functions
jobDemandNamedTuple = collections.namedtuple(
'_jobDemandNamedTuple',
['jobId', 'minimumWorkers', 'maximumWorkers', 'priority',
'engAllocateNewWorkers', 'engUntendedDeadWorkers', 'numFailedWorkers',
'engJobType'])
def __init__(self):
super(ClientJobsDAO._JobsTableInfo, self).__init__()
# Generated dynamically after introspecting jobs table columns. Attributes
# of this namedtuple are the public names of the jobs table columns.
self.jobInfoNamedTuple = None
class _ModelsTableInfo(_TableInfoBase):
__slots__ = ("modelInfoNamedTuple",)
# The namedtuple classes that we use to return information from various
# functions
getParamsNamedTuple = collections.namedtuple(
'_modelsGetParamsNamedTuple', ['modelId', 'params', 'engParamsHash'])
getResultAndStatusNamedTuple = collections.namedtuple(
'_modelsGetResultAndStatusNamedTuple',
['modelId', 'results', 'status', 'updateCounter', 'numRecords',
'completionReason', 'completionMsg', 'engParamsHash', 'engMatured'])
getUpdateCountersNamedTuple = collections.namedtuple(
'_modelsGetUpdateCountersNamedTuple', ['modelId', 'updateCounter'])
def __init__(self):
super(ClientJobsDAO._ModelsTableInfo, self).__init__()
# Generated dynamically after introspecting models columns. Attributes
# of this namedtuple are the public names of the models table columns.
self.modelInfoNamedTuple = None
_SEQUENCE_TYPES = (list, set, tuple)
""" Sequence types that we accept in args """
# There is one instance of the ClientJobsDAO per process. This class static
# variable gets filled in the first time the process calls
# ClientJobsDAO.get()
_instance = None
# The root name and version of the database. The actual database name is
# something of the form "client_jobs_v2_suffix".
_DB_ROOT_NAME = 'client_jobs'
_DB_VERSION = 29
@classmethod
def dbNamePrefix(cls):
""" Get the beginning part of the database name for the current version
of the database. This, concatenated with
'_' + Configuration.get('nupic.cluster.database.nameSuffix') will
produce the actual database name used.
"""
return cls.__getDBNamePrefixForVersion(cls._DB_VERSION)
@classmethod
def __getDBNamePrefixForVersion(cls, dbVersion):
""" Get the beginning part of the database name for the given database
version. This, concatenated with
'_' + Configuration.get('nupic.cluster.database.nameSuffix') will
produce the actual database name used.
Parameters:
----------------------------------------------------------------
dbVersion: ClientJobs database version number
retval: the ClientJobs database name prefix for the given DB version
"""
return '{0!s}_v{1:d}'.format(cls._DB_ROOT_NAME, dbVersion)
@classmethod
def _getDBName(cls):
""" Generates the ClientJobs database name for the current version of the
database; "semi-private" class method for use by friends of the class.
Parameters:
----------------------------------------------------------------
retval: the ClientJobs database name
"""
return cls.__getDBNameForVersion(cls._DB_VERSION)
@classmethod
def __getDBNameForVersion(cls, dbVersion):
""" Generates the ClientJobs database name for the given version of the
database
Parameters:
----------------------------------------------------------------
dbVersion: ClientJobs database version number
retval: the ClientJobs database name for the given DB version
"""
# DB Name prefix for the given version
prefix = cls.__getDBNamePrefixForVersion(dbVersion)
# DB Name suffix
suffix = Configuration.get('nupic.cluster.database.nameSuffix')
# Replace dash with underscore (dash will break SQL e.g. 'ec2-user')
suffix = suffix.replace("-", "_")
# Create the name of the database for the given DB version
dbName = '{0!s}_{1!s}'.format(prefix, suffix)
return dbName
@staticmethod
@logExceptions(_LOGGER)
def get():
""" Get the instance of the ClientJobsDAO created for this process (or
perhaps at some point in the future, for this thread).
Parameters:
----------------------------------------------------------------
retval: instance of ClientJobsDAO
"""
# Instantiate if needed
if ClientJobsDAO._instance is None:
cjDAO = ClientJobsDAO()
cjDAO.connect()
ClientJobsDAO._instance = cjDAO
# Return the instance to the caller
return ClientJobsDAO._instance
@logExceptions(_LOGGER)
def __init__(self):
""" Instantiate a ClientJobsDAO instance.
Parameters:
----------------------------------------------------------------
"""
self._logger = _LOGGER
# Usage error to instantiate more than 1 instance per process
assert (ClientJobsDAO._instance is None)
# Create the name of the current version database
self.dbName = self._getDBName()
# NOTE: we set the table names here; the rest of the table info is set when
# the tables are initialized during connect()
self._jobs = self._JobsTableInfo()
self._jobs.tableName = '{0!s}.jobs'.format((self.dbName))
self._models = self._ModelsTableInfo()
self._models.tableName = '{0!s}.models'.format((self.dbName))
# Our connection ID, filled in during connect()
self._connectionID = None
@property
def jobsTableName(self):
return self._jobs.tableName
@property
def modelsTableName(self):
return self._models.tableName
def _columnNameDBToPublic(self, dbName):
""" Convert a database internal column name to a public name. This
takes something of the form word1_word2_word3 and converts it to:
word1Word2Word3. If the db field name starts with '_', it is stripped out
so that the name is compatible with collections.namedtuple.
for example: _word1_word2_word3 => word1Word2Word3
Parameters:
--------------------------------------------------------------
dbName: database internal field name
retval: public name
"""
words = dbName.split('_')
if dbName.startswith('_'):
words = words[1:]
pubWords = [words[0]]
for word in words[1:]:
pubWords.append(word[0].upper() + word[1:])
return ''.join(pubWords)
@logExceptions(_LOGGER)
@g_retrySQL
def connect(self, deleteOldVersions=False, recreate=False):
""" Locate the current version of the jobs DB or create a new one, and
optionally delete old versions laying around. If desired, this method
can be called at any time to re-create the tables from scratch, delete
old versions of the database, etc.
Parameters:
----------------------------------------------------------------
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
"""
# Initialize tables, if needed
with ConnectionFactory.get() as conn:
# Initialize tables
self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions,
recreate=recreate)
# Save our connection id
conn.cursor.execute('SELECT CONNECTION_ID()')
self._connectionID = conn.cursor.fetchall()[0][0]
self._logger.info("clientJobsConnectionID=%r", self._connectionID)
return
@logExceptions(_LOGGER)
def _initTables(self, cursor, deleteOldVersions, recreate):
""" Initialize tables, if needed
Parameters:
----------------------------------------------------------------
cursor: SQL cursor
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
"""
# Delete old versions if they exist
if deleteOldVersions:
self._logger.info(
"Dropping old versions of client_jobs DB; called from: %r",
traceback.format_stack())
for i in range(self._DB_VERSION):
cursor.execute('DROP DATABASE IF EXISTS {0!s}'.format(self.__getDBNameForVersion(i)))
# Create the database if necessary
if recreate:
self._logger.info(
"Dropping client_jobs DB %r; called from: %r",
self.dbName, traceback.format_stack())
cursor.execute('DROP DATABASE IF EXISTS {0!s}'.format((self.dbName)))
cursor.execute('CREATE DATABASE IF NOT EXISTS {0!s}'.format((self.dbName)))
# Get the list of tables
cursor.execute('SHOW TABLES IN {0!s}'.format((self.dbName)))
output = cursor.fetchall()
tableNames = [x[0] for x in output]
# ------------------------------------------------------------------------
# Create the jobs table if it doesn't exist
# Fields that start with '_eng' are intended for private use by the engine
# and should not be used by the UI
if 'jobs' not in tableNames:
self._logger.info("Creating table %r", self.jobsTableName)
fields = [
'job_id INT UNSIGNED NOT NULL AUTO_INCREMENT',
# unique jobID
'client CHAR({0:d})'.format((self.CLIENT_MAX_LEN)),
# name of client (UI, StrmMgr, etc.)
'client_info LONGTEXT',
# Arbitrary data defined by the client
'client_key varchar(255)',
# Foreign key as defined by the client.
'cmd_line LONGTEXT',
# command line to use to launch each worker process
'params LONGTEXT',
# JSON encoded params for the job, for use by the worker processes
'job_hash BINARY({0:d}) DEFAULT NULL'.format((self.HASH_MAX_LEN)),
# unique hash of the job, provided by the client. Used for detecting
# identical job requests from the same client when they use the
# jobInsertUnique() method.
'status VARCHAR(16) DEFAULT "notStarted"',
# One of the STATUS_XXX enumerated value strings
'completion_reason VARCHAR(16)',
# One of the CMPL_REASON_XXX enumerated value strings.
# NOTE: This is the job completion reason according to the hadoop
# job-tracker. A success here does not necessarily mean the
# workers were "happy" with the job. To see if the workers
# failed, check the worker_completion_reason
'completion_msg LONGTEXT',
# Why this job completed, according to job-tracker
'worker_completion_reason VARCHAR(16) DEFAULT "{0!s}"'.format( \
self.CMPL_REASON_SUCCESS),
# One of the CMPL_REASON_XXX enumerated value strings. This is
# may be changed to CMPL_REASON_ERROR if any workers encounter
# an error while running the job.
'worker_completion_msg LONGTEXT',
# Why this job completed, according to workers. If
# worker_completion_reason is set to CMPL_REASON_ERROR, this will
# contain the error information.
'cancel BOOLEAN DEFAULT FALSE',
# set by UI, polled by engine
'start_time DATETIME DEFAULT 0',
# When job started
'end_time DATETIME DEFAULT 0',
# When job ended
'results LONGTEXT',
# JSON dict with general information about the results of the job,
# including the ID and value of the best model
# TODO: different semantics for results field of ProductionJob
'_eng_job_type VARCHAR(32)',
# String used to specify the type of job that this is. Current
# choices are hypersearch, production worker, or stream worker
'minimum_workers INT UNSIGNED DEFAULT 0',
# min number of desired workers at a time. If 0, no workers will be
# allocated in a crunch
'maximum_workers INT UNSIGNED DEFAULT 0',
# max number of desired workers at a time. If 0, then use as many
# as practical given load on the cluster.
'priority INT DEFAULT {0:d}'.format(self.DEFAULT_JOB_PRIORITY),
# job scheduling priority; 0 is the default priority (
# ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are higher
# priority (up to ClientJobsDAO.MAX_JOB_PRIORITY), and negative
# values are lower priority (down to ClientJobsDAO.MIN_JOB_PRIORITY)
'_eng_allocate_new_workers BOOLEAN DEFAULT TRUE',
# Should the scheduling algorithm allocate new workers to this job?
# If a specialized worker willingly gives up control, we set this
# field to FALSE to avoid allocating new workers.
'_eng_untended_dead_workers BOOLEAN DEFAULT FALSE',
# If a specialized worker fails or is killed by the scheduler, we
# set this feild to TRUE to indicate that the worker is dead
'num_failed_workers INT UNSIGNED DEFAULT 0',
# The number of failed specialized workers for this job. If the
# number of failures is >= max.failed.attempts, we mark the job
# as failed
'last_failed_worker_error_msg LONGTEXT',
# Error message of the most recent specialized failed worker
'_eng_cleaning_status VARCHAR(16) DEFAULT "{0!s}"'.format( \
self.CLEAN_NOT_DONE),
# Has the job been garbage collected, this includes removing
# unneeded # model output caches, s3 checkpoints.
'gen_base_description LONGTEXT',
# The contents of the generated description.py file from hypersearch
# requests. This is generated by the Hypersearch workers and stored
# here for reference, debugging, and development purposes.
'gen_permutations LONGTEXT',
# The contents of the generated permutations.py file from
# hypersearch requests. This is generated by the Hypersearch workers
# and stored here for reference, debugging, and development
# purposes.
'_eng_last_update_time DATETIME DEFAULT 0',
# time stamp of last update, used for detecting stalled jobs
'_eng_cjm_conn_id INT UNSIGNED',
# ID of the CJM starting up this job
'_eng_worker_state LONGTEXT',
# JSON encoded state of the hypersearch in progress, for private
# use by the Hypersearch workers
'_eng_status LONGTEXT',
# String used for status messages sent from the engine for
# informative purposes only. Usually printed periodically by
# clients watching a job progress.
'_eng_model_milestones LONGTEXT',
# JSon encoded object with information about global model milestone
# results
'PRIMARY KEY (job_id)',
'UNIQUE INDEX (client, job_hash)',
'INDEX (status)',
'INDEX (client_key)'
]
options = [
'AUTO_INCREMENT=1000',
]
query = 'CREATE TABLE IF NOT EXISTS {0!s} ({1!s}) {2!s}'.format(self.jobsTableName, ','.join(fields), ','.join(options))
cursor.execute(query)
# ------------------------------------------------------------------------
# Create the models table if it doesn't exist
# Fields that start with '_eng' are intended for private use by the engine
# and should not be used by the UI
if 'models' not in tableNames:
self._logger.info("Creating table %r", self.modelsTableName)
fields = [
'model_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT',
# globally unique model ID
'job_id INT UNSIGNED NOT NULL',
# jobID
'params LONGTEXT NOT NULL',
# JSON encoded params for the model
'status VARCHAR(16) DEFAULT "notStarted"',
# One of the STATUS_XXX enumerated value strings
'completion_reason VARCHAR(16)',
# One of the CMPL_REASON_XXX enumerated value strings
'completion_msg LONGTEXT',
# Why this job completed
'results LONGTEXT DEFAULT NULL',
# JSON encoded structure containing metrics produced by the model
'optimized_metric FLOAT ',
#Value of the particular metric we are optimizing in hypersearch
'update_counter INT UNSIGNED DEFAULT 0',
# incremented by engine every time the results is updated
'num_records INT UNSIGNED DEFAULT 0',
# number of records processed so far
'start_time DATETIME DEFAULT 0',
# When this model started being evaluated
'end_time DATETIME DEFAULT 0',
# When this model completed
'cpu_time FLOAT DEFAULT 0',
# How much actual CPU time was spent on this model, in seconds. This
# excludes time the process spent sleeping, or otherwise not
# actually executing code.
'model_checkpoint_id LONGTEXT',
# Checkpoint identifier for this model (after it has been saved)
'gen_description LONGTEXT',
# The contents of the generated description.py file from hypersearch
# requests. This is generated by the Hypersearch workers and stored
# here for reference, debugging, and development purposes.
'_eng_params_hash BINARY({0:d}) DEFAULT NULL'.format((self.HASH_MAX_LEN)),
# MD5 hash of the params
'_eng_particle_hash BINARY({0:d}) DEFAULT NULL'.format((self.HASH_MAX_LEN)),
# MD5 hash of the particle info for PSO algorithm
'_eng_last_update_time DATETIME DEFAULT 0',
# time stamp of last update, used for detecting stalled workers
'_eng_task_tracker_id TINYBLOB',
# Hadoop Task Tracker ID
'_eng_worker_id TINYBLOB',
# Hadoop Map Task ID
'_eng_attempt_id TINYBLOB',
# Hadoop Map task attempt ID
'_eng_worker_conn_id INT DEFAULT 0',
# database client connection ID of the worker that is running this
# model
'_eng_milestones LONGTEXT',
# A JSON encoded list of metric values for the model at each
# milestone point
'_eng_stop VARCHAR(16) DEFAULT NULL',
# One of the STOP_REASON_XXX enumerated value strings. Set either by
# the swarm terminator of either the current, or another
# Hypersearch worker.
'_eng_matured BOOLEAN DEFAULT FALSE',
# Set by the model maturity-checker when it decides that this model
# has "matured". This means that it has reached the point of
# not getting better results with more data.
'PRIMARY KEY (model_id)',
'UNIQUE INDEX (job_id, _eng_params_hash)',
'UNIQUE INDEX (job_id, _eng_particle_hash)',
]
options = [
'AUTO_INCREMENT=1000',
]
query = 'CREATE TABLE IF NOT EXISTS {0!s} ({1!s}) {2!s}'.format(self.modelsTableName, ','.join(fields), ','.join(options))
cursor.execute(query)
# ---------------------------------------------------------------------
# Get the field names for each table
cursor.execute('DESCRIBE {0!s}'.format((self.jobsTableName)))
fields = cursor.fetchall()
self._jobs.dbFieldNames = [str(field[0]) for field in fields]
cursor.execute('DESCRIBE {0!s}'.format((self.modelsTableName)))
fields = cursor.fetchall()
self._models.dbFieldNames = [str(field[0]) for field in fields]
# ---------------------------------------------------------------------
# Generate the public names
self._jobs.publicFieldNames = [self._columnNameDBToPublic(x)
for x in self._jobs.dbFieldNames]
self._models.publicFieldNames = [self._columnNameDBToPublic(x)
for x in self._models.dbFieldNames]
# ---------------------------------------------------------------------
# Generate the name conversion dicts
self._jobs.pubToDBNameDict = dict(
zip(self._jobs.publicFieldNames, self._jobs.dbFieldNames))
self._jobs.dbToPubNameDict = dict(
zip(self._jobs.dbFieldNames, self._jobs.publicFieldNames))
self._models.pubToDBNameDict = dict(
zip(self._models.publicFieldNames, self._models.dbFieldNames))
self._models.dbToPubNameDict = dict(
zip(self._models.dbFieldNames, self._models.publicFieldNames))
# ---------------------------------------------------------------------
# Generate the dynamic namedtuple classes we use
self._models.modelInfoNamedTuple = collections.namedtuple(
'_modelInfoNamedTuple', self._models.publicFieldNames)
self._jobs.jobInfoNamedTuple = collections.namedtuple(
'_jobInfoNamedTuple', self._jobs.publicFieldNames)
return
def _getMatchingRowsNoRetries(self, tableInfo, conn, fieldsToMatch,
selectFieldNames, maxRows=None):
""" Return a sequence of matching rows with the requested field values from
a table or empty sequence if nothing matched.
tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance
conn: Owned connection acquired from ConnectionFactory.get()
fieldsToMatch: Dictionary of internal fieldName/value mappings that
identify the desired rows. If a value is an instance of
ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the
operator 'IN' will be used in the corresponding SQL
predicate; if the value is bool: "IS TRUE/FALSE"; if the
value is None: "IS NULL"; '=' will be used for all other
cases.
selectFieldNames:
list of fields to return, using internal field names
maxRows: maximum number of rows to return; unlimited if maxRows
is None
retval: A sequence of matching rows, each row consisting of field
values in the order of the requested field names. Empty
sequence is returned when not match exists.
"""
assert fieldsToMatch, repr(fieldsToMatch)
assert all(k in tableInfo.dbFieldNames
for k in fieldsToMatch.iterkeys()), repr(fieldsToMatch)
assert selectFieldNames, repr(selectFieldNames)
assert all(f in tableInfo.dbFieldNames for f in selectFieldNames), repr(
selectFieldNames)
# NOTE: make sure match expressions and values are in the same order
matchPairs = fieldsToMatch.items()
matchExpressionGen = (
p[0] +
(' IS ' + {True:'TRUE', False:'FALSE'}[p[1]] if isinstance(p[1], bool)
else ' IS NULL' if p[1] is None
else ' IN %s' if isinstance(p[1], self._SEQUENCE_TYPES)
else '=%s')
for p in matchPairs)
matchFieldValues = [p[1] for p in matchPairs
if (not isinstance(p[1], (bool)) and p[1] is not None)]
query = 'SELECT {0!s} FROM {1!s} WHERE ({2!s})'.format(
','.join(selectFieldNames), tableInfo.tableName,
' AND '.join(matchExpressionGen))
sqlParams = matchFieldValues
if maxRows is not None:
query += ' LIMIT %s'
sqlParams.append(maxRows)
conn.cursor.execute(query, sqlParams)
rows = conn.cursor.fetchall()
if rows:
assert maxRows is None or len(rows) <= maxRows, "{0:d} !<= {1:d}".format(
len(rows), maxRows)
assert len(rows[0]) == len(selectFieldNames), "{0:d} != {1:d}".format(
len(rows[0]), len(selectFieldNames))
else:
rows = tuple()
return rows
@g_retrySQL
def _getMatchingRowsWithRetries(self, tableInfo, fieldsToMatch,
selectFieldNames, maxRows=None):
""" Like _getMatchingRowsNoRetries(), but with retries on transient MySQL
failures
"""
with ConnectionFactory.get() as conn:
return self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch,
selectFieldNames, maxRows)
def _getOneMatchingRowNoRetries(self, tableInfo, conn, fieldsToMatch,
selectFieldNames):
""" Return a single matching row with the requested field values from the
the requested table or None if nothing matched.
tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance
conn: Owned connection acquired from ConnectionFactory.get()
fieldsToMatch: Dictionary of internal fieldName/value mappings that
identify the desired rows. If a value is an instance of
ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the
operator 'IN' will be used in the corresponding SQL
predicate; if the value is bool: "IS TRUE/FALSE"; if the
value is None: "IS NULL"; '=' will be used for all other
cases.
selectFieldNames:
list of fields to return, using internal field names
retval: A sequence of field values of the matching row in the order
of the given field names; or None if there was no match.
"""
rows = self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch,
selectFieldNames, maxRows=1)
if rows:
assert len(rows) == 1, repr(len(rows))
result = rows[0]
else:
result = None
return result
@g_retrySQL
def _getOneMatchingRowWithRetries(self, tableInfo, fieldsToMatch,
selectFieldNames):
""" Like _getOneMatchingRowNoRetries(), but with retries on transient MySQL
failures
"""
with ConnectionFactory.get() as conn:
return self._getOneMatchingRowNoRetries(tableInfo, conn, fieldsToMatch,
selectFieldNames)
@classmethod
def _normalizeHash(cls, hashValue):
hashLen = len(hashValue)
if hashLen < cls.HASH_MAX_LEN:
hashValue += '\0' * (cls.HASH_MAX_LEN - hashLen)
else:
assert hashLen <= cls.HASH_MAX_LEN, (
"Hash is too long: hashLen=%r; hashValue=%r") % (hashLen, hashValue)
return hashValue
def _insertOrGetUniqueJobNoRetries(
self, conn, client, cmdLine, jobHash, clientInfo, clientKey, params,
minimumWorkers, maximumWorkers, jobType, priority, alreadyRunning):
""" Attempt to insert a row with the given parameters into the jobs table.
Return jobID of the inserted row, or of an existing row with matching
client/jobHash key.
The combination of client and jobHash are expected to be unique (enforced
by a unique index on the two columns).
NOTE: It's possibe that this or another process (on this or another machine)
already inserted a row with matching client/jobHash key (e.g.,
StreamMgr). This may also happen undetected by this function due to a
partially-successful insert operation (e.g., row inserted, but then
connection was lost while reading response) followed by retries either of
this function or in SteadyDB module.
Parameters:
----------------------------------------------------------------
conn: Owned connection acquired from ConnectionFactory.get()
client: Name of the client submitting the job
cmdLine: Command line to use to launch each worker process; must be
a non-empty string
jobHash: unique hash of this job. The caller must insure that this,
together with client, uniquely identifies this job request
for the purposes of detecting duplicates.
clientInfo: JSON encoded dict of client specific information.
clientKey: Foreign key.
params: JSON encoded dict of the parameters for the job. This
can be fetched out of the database by the worker processes
based on the jobID.
minimumWorkers: minimum number of workers design at a time.
maximumWorkers: maximum number of workers desired at a time.
priority: Job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are
higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),
and negative values are lower priority (down to
ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will
be scheduled to run at the expense of the lower-priority
jobs, and higher-priority job tasks will preempt those
with lower priority if there is inadequate supply of
scheduling slots. Excess lower priority job tasks will
starve as long as slot demand exceeds supply. Most jobs
should be scheduled with DEFAULT_JOB_PRIORITY. System jobs
that must run at all cost, such as Multi-Model-Master,
should be scheduled with MAX_JOB_PRIORITY.
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop- it gives it a job
record to work with.
retval: jobID of the inserted jobs row, or of an existing jobs row
with matching client/jobHash key
"""
assert len(client) <= self.CLIENT_MAX_LEN, "client too long:" + repr(client)
assert cmdLine, "Unexpected empty or None command-line: " + repr(cmdLine)
assert len(jobHash) == self.HASH_MAX_LEN, "wrong hash len={0:d}".format(len(jobHash))
# Initial status
if alreadyRunning:
# STATUS_TESTMODE, so that scheduler won't pick it up (for in-proc tests)
initStatus = self.STATUS_TESTMODE
else:
initStatus = self.STATUS_NOTSTARTED
# Create a new job entry
query = 'INSERT IGNORE INTO %s (status, client, client_info, client_key,' \
'cmd_line, params, job_hash, _eng_last_update_time, ' \
'minimum_workers, maximum_workers, priority, _eng_job_type) ' \
' VALUES (%%s, %%s, %%s, %%s, %%s, %%s, %%s, ' \
' UTC_TIMESTAMP(), %%s, %%s, %%s, %%s) ' \
% (self.jobsTableName,)
sqlParams = (initStatus, client, clientInfo, clientKey, cmdLine, params,
jobHash, minimumWorkers, maximumWorkers, priority, jobType)
numRowsInserted = conn.cursor.execute(query, sqlParams)
jobID = 0
if numRowsInserted == 1:
# Get the chosen job id
# NOTE: LAST_INSERT_ID() returns 0 after intermittent connection failure
conn.cursor.execute('SELECT LAST_INSERT_ID()')
jobID = conn.cursor.fetchall()[0][0]
if jobID == 0:
self._logger.warn(
'_insertOrGetUniqueJobNoRetries: SELECT LAST_INSERT_ID() returned 0; '
'likely due to reconnection in SteadyDB following INSERT. '
'jobType=%r; client=%r; clientInfo=%r; clientKey=%s; jobHash=%r; '
'cmdLine=%r',
jobType, client, _abbreviate(clientInfo, 32), clientKey, jobHash,
cmdLine)
else:
# Assumption: nothing was inserted because this is a retry and the row
# with this client/hash already exists from our prior
# partially-successful attempt; or row with matching client/jobHash was
# inserted already by some process on some machine.
assert numRowsInserted == 0, repr(numRowsInserted)
if jobID == 0:
# Recover from intermittent failure in a partially-successful attempt;
# or row with matching client/jobHash was already in table
row = self._getOneMatchingRowNoRetries(
self._jobs, conn, dict(client=client, job_hash=jobHash), ['job_id'])
assert row is not None
assert len(row) == 1, 'Unexpected num fields: ' + repr(len(row))
jobID = row[0]
# ---------------------------------------------------------------------
# If asked to enter the job in the running state, set the connection id
# and start time as well
if alreadyRunning:
query = 'UPDATE %s SET _eng_cjm_conn_id=%%s, ' \
' start_time=UTC_TIMESTAMP(), ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE job_id=%%s' \
% (self.jobsTableName,)
conn.cursor.execute(query, (self._connectionID, jobID))
return jobID
def _resumeJobNoRetries(self, conn, jobID, alreadyRunning):
""" Resumes processing of an existing job that is presently in the
STATUS_COMPLETED state.
NOTE: this is primarily for resuming suspended Production and Stream Jobs; DO
NOT use it on Hypersearch jobs.
This prepares an existing job entry to resume processing. The CJM is always
periodically sweeping the jobs table and when it finds a job that is ready
to run, it will proceed to start it up on Hadoop.
Parameters:
----------------------------------------------------------------
conn: Owned connection acquired from ConnectionFactory.get()
jobID: jobID of the job to resume
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop.
raises: Throws a RuntimeError if no rows are affected. This could
either be because:
1) Because there was not matching jobID
2) or if the status of the job was not STATUS_COMPLETED.
retval: nothing
"""
# Initial status
if alreadyRunning:
# Use STATUS_TESTMODE so scheduler will leave our row alone
initStatus = self.STATUS_TESTMODE
else:
initStatus = self.STATUS_NOTSTARTED
# NOTE: some of our clients (e.g., StreamMgr) may call us (directly or
# indirectly) for the same job from different processes (even different
# machines), so we should be prepared for the update to fail; same holds
# if the UPDATE succeeds, but connection fails while reading result
assignments = [
'status=%s',
'completion_reason=DEFAULT',
'completion_msg=DEFAULT',
'worker_completion_reason=DEFAULT',
'worker_completion_msg=DEFAULT',
'end_time=DEFAULT',
'cancel=DEFAULT',
'_eng_last_update_time=UTC_TIMESTAMP()',
'_eng_allocate_new_workers=DEFAULT',
'_eng_untended_dead_workers=DEFAULT',
'num_failed_workers=DEFAULT',
'last_failed_worker_error_msg=DEFAULT',
'_eng_cleaning_status=DEFAULT',
]
assignmentValues = [initStatus]
if alreadyRunning:
assignments += ['_eng_cjm_conn_id=%s', 'start_time=UTC_TIMESTAMP()',
'_eng_last_update_time=UTC_TIMESTAMP()']
assignmentValues.append(self._connectionID)
else:
assignments += ['_eng_cjm_conn_id=DEFAULT', 'start_time=DEFAULT']
assignments = ', '.join(assignments)
query = 'UPDATE %s SET %s ' \
' WHERE job_id=%%s AND status=%%s' \
% (self.jobsTableName, assignments)
sqlParams = assignmentValues + [jobID, self.STATUS_COMPLETED]
numRowsAffected = conn.cursor.execute(query, sqlParams)
assert numRowsAffected <= 1, repr(numRowsAffected)
if numRowsAffected == 0:
self._logger.info(
"_resumeJobNoRetries: Redundant job-resume UPDATE: job was not "
"suspended or was resumed by another process or operation was retried "
"after connection failure; jobID=%s", jobID)
return
def getConnectionID(self):
""" Return our connection ID. This can be used for worker identification
purposes.
NOTE: the actual MySQL connection ID used in queries may change from time
to time if connection is re-acquired (e.g., upon MySQL server restart) or
when more than one entry from the connection pool has been used (e.g.,
multi-threaded apps)
"""
return self._connectionID
@logExceptions(_LOGGER)
def jobSuspend(self, jobID):
""" Requests a job to be suspended
NOTE: this is primarily for suspending Production Jobs; DO NOT use
it on Hypersearch jobs. For canceling any job type, use jobCancel() instead!
Parameters:
----------------------------------------------------------------
jobID: jobID of the job to resume
retval: nothing
"""
# TODO: validate that the job is in the appropriate state for being
# suspended: consider using a WHERE clause to make sure that
# the job is not already in the "completed" state
# TODO: when Nupic job control states get figured out, there may be a
# different way to suspend jobs ("cancel" doesn't make sense for this)
# NOTE: jobCancel() does retries on transient mysql failures
self.jobCancel(jobID)
return
@logExceptions(_LOGGER)
def jobResume(self, jobID, alreadyRunning=False):
""" Resumes processing of an existing job that is presently in the
STATUS_COMPLETED state.
NOTE: this is primarily for resuming suspended Production Jobs; DO NOT use
it on Hypersearch jobs.
NOTE: The job MUST be in the STATUS_COMPLETED state at the time of this
call, otherwise an exception will be raised.
This prepares an existing job entry to resume processing. The CJM is always
periodically sweeping the jobs table and when it finds a job that is ready
to run, will proceed to start it up on Hadoop.
Parameters:
----------------------------------------------------------------
job: jobID of the job to resume
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop.
raises: Throws a RuntimeError if no rows are affected. This could
either be because:
1) Because there was not matching jobID
2) or if the status of the job was not STATUS_COMPLETED.
retval: nothing
"""
row = self.jobGetFields(jobID, ['status'])
(jobStatus,) = row
if jobStatus != self.STATUS_COMPLETED:
raise RuntimeError(("Failed to resume job: job was not suspended; "
"jobID=%s; job status=%r") % (jobID, jobStatus))
# NOTE: on MySQL failures, we need to retry ConnectionFactory.get() as well
# in order to recover from lost connections
@g_retrySQL
def resumeWithRetries():
with ConnectionFactory.get() as conn:
self._resumeJobNoRetries(conn, jobID, alreadyRunning)
resumeWithRetries()
return
@logExceptions(_LOGGER)
def jobInsert(self, client, cmdLine, clientInfo='', clientKey='', params='',
alreadyRunning=False, minimumWorkers=0, maximumWorkers=0,
jobType='', priority=DEFAULT_JOB_PRIORITY):
""" Add an entry to the jobs table for a new job request. This is called by
clients that wish to startup a new job, like a Hypersearch, stream job, or
specific model evaluation from the engine.
This puts a new entry into the jobs table. The CJM is always periodically
sweeping the jobs table and when it finds a new job, will proceed to start it
up on Hadoop.
Parameters:
----------------------------------------------------------------
client: Name of the client submitting the job
cmdLine: Command line to use to launch each worker process; must be
a non-empty string
clientInfo: JSON encoded dict of client specific information.
clientKey: Foreign key.
params: JSON encoded dict of the parameters for the job. This
can be fetched out of the database by the worker processes
based on the jobID.
alreadyRunning: Used for unit test purposes only. This inserts the job
in the running state. It is used when running a worker
in standalone mode without hadoop - it gives it a job
record to work with.
minimumWorkers: minimum number of workers design at a time.
maximumWorkers: maximum number of workers desired at a time.
jobType: The type of job that this is. This should be one of the
JOB_TYPE_XXXX enums. This is needed to allow a standard
way of recognizing a job's function and capabilities.
priority: Job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are
higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),
and negative values are lower priority (down to
ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will
be scheduled to run at the expense of the lower-priority
jobs, and higher-priority job tasks will preempt those
with lower priority if there is inadequate supply of
scheduling slots. Excess lower priority job tasks will
starve as long as slot demand exceeds supply. Most jobs
should be scheduled with DEFAULT_JOB_PRIORITY. System jobs
that must run at all cost, such as Multi-Model-Master,
should be scheduled with MAX_JOB_PRIORITY.
retval: jobID - unique ID assigned to this job
"""
jobHash = self._normalizeHash(uuid.uuid1().bytes)
@g_retrySQL
def insertWithRetries():
with ConnectionFactory.get() as conn:
return self._insertOrGetUniqueJobNoRetries(
conn, client=client, cmdLine=cmdLine, jobHash=jobHash,
clientInfo=clientInfo, clientKey=clientKey, params=params,
minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers,
jobType=jobType, priority=priority, alreadyRunning=alreadyRunning)
try:
jobID = insertWithRetries()
except:
self._logger.exception(
'jobInsert FAILED: jobType=%r; client=%r; clientInfo=%r; clientKey=%r;'
'jobHash=%r; cmdLine=%r',
jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash,
cmdLine)
raise
else:
self._logger.info(
'jobInsert: returning jobID=%s. jobType=%r; client=%r; clientInfo=%r; '
'clientKey=%r; jobHash=%r; cmdLine=%r',
jobID, jobType, client, _abbreviate(clientInfo, 48), clientKey,
jobHash, cmdLine)
return jobID
@logExceptions(_LOGGER)
def jobInsertUnique(self, client, cmdLine, jobHash, clientInfo='',
clientKey='', params='', minimumWorkers=0,
maximumWorkers=0, jobType='',
priority=DEFAULT_JOB_PRIORITY):
""" Add an entry to the jobs table for a new job request, but only if the
same job, by the same client is not already running. If the job is already
running, or queued up to run, this call does nothing. If the job does not
exist in the jobs table or has completed, it will be inserted and/or started
up again.
This method is called by clients, like StreamMgr, that wish to only start up
a job if it hasn't already been started up.
Parameters:
----------------------------------------------------------------
client: Name of the client submitting the job
cmdLine: Command line to use to launch each worker process; must be
a non-empty string
jobHash: unique hash of this job. The client must insure that this
uniquely identifies this job request for the purposes
of detecting duplicates.
clientInfo: JSON encoded dict of client specific information.
clientKey: Foreign key.
params: JSON encoded dict of the parameters for the job. This
can be fetched out of the database by the worker processes
based on the jobID.
minimumWorkers: minimum number of workers design at a time.
maximumWorkers: maximum number of workers desired at a time.
jobType: The type of job that this is. This should be one of the
JOB_TYPE_XXXX enums. This is needed to allow a standard
way of recognizing a job's function and capabilities.
priority: Job scheduling priority; 0 is the default priority (
ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are
higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),
and negative values are lower priority (down to
ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will
be scheduled to run at the expense of the lower-priority
jobs, and higher-priority job tasks will preempt those
with lower priority if there is inadequate supply of
scheduling slots. Excess lower priority job tasks will
starve as long as slot demand exceeds supply. Most jobs
should be scheduled with DEFAULT_JOB_PRIORITY. System jobs
that must run at all cost, such as Multi-Model-Master,
should be scheduled with MAX_JOB_PRIORITY.
retval: jobID of the newly inserted or existing job.
"""
assert cmdLine, "Unexpected empty or None command-line: " + repr(cmdLine)
@g_retrySQL
def insertUniqueWithRetries():
jobHashValue = self._normalizeHash(jobHash)
jobID = None
with ConnectionFactory.get() as conn:
row = self._getOneMatchingRowNoRetries(
self._jobs, conn, dict(client=client, job_hash=jobHashValue),
['job_id', 'status'])
if row is not None:
(jobID, status) = row
if status == self.STATUS_COMPLETED:
# Restart existing job that had completed
query = 'UPDATE %s SET client_info=%%s, ' \
' client_key=%%s, ' \
' cmd_line=%%s, ' \
' params=%%s, ' \
' minimum_workers=%%s, ' \
' maximum_workers=%%s, ' \
' priority=%%s, '\
' _eng_job_type=%%s ' \
' WHERE (job_id=%%s AND status=%%s)' \
% (self.jobsTableName,)
sqlParams = (clientInfo, clientKey, cmdLine, params,
minimumWorkers, maximumWorkers, priority,
jobType, jobID, self.STATUS_COMPLETED)
numRowsUpdated = conn.cursor.execute(query, sqlParams)
assert numRowsUpdated <= 1, repr(numRowsUpdated)
if numRowsUpdated == 0:
self._logger.info(
"jobInsertUnique: Redundant job-reuse UPDATE: job restarted by "
"another process, values were unchanged, or operation was "
"retried after connection failure; jobID=%s", jobID)
# Restart the job, unless another process beats us to it
self._resumeJobNoRetries(conn, jobID, alreadyRunning=False)
else:
# There was no job row with matching client/jobHash, so insert one
jobID = self._insertOrGetUniqueJobNoRetries(
conn, client=client, cmdLine=cmdLine, jobHash=jobHashValue,
clientInfo=clientInfo, clientKey=clientKey, params=params,
minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers,
jobType=jobType, priority=priority, alreadyRunning=False)
return jobID
try:
jobID = insertUniqueWithRetries()
except:
self._logger.exception(
'jobInsertUnique FAILED: jobType=%r; client=%r; '
'clientInfo=%r; clientKey=%r; jobHash=%r; cmdLine=%r',
jobType, client, _abbreviate(clientInfo, 48), clientKey, jobHash,
cmdLine)
raise
else:
self._logger.info(
'jobInsertUnique: returning jobID=%s. jobType=%r; client=%r; '
'clientInfo=%r; clientKey=%r; jobHash=%r; cmdLine=%r',
jobID, jobType, client, _abbreviate(clientInfo, 48), clientKey,
jobHash, cmdLine)
return jobID
@g_retrySQL
def _startJobWithRetries(self, jobID):
""" Place the given job in STATUS_RUNNING mode; the job is expected to be
STATUS_NOTSTARTED.
NOTE: this function was factored out of jobStartNext because it's also
needed for testing (e.g., test_client_jobs_dao.py)
"""
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET status=%%s, ' \
' _eng_cjm_conn_id=%%s, ' \
' start_time=UTC_TIMESTAMP(), ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE (job_id=%%s AND status=%%s)' \
% (self.jobsTableName,)
sqlParams = [self.STATUS_RUNNING, self._connectionID,
jobID, self.STATUS_NOTSTARTED]
numRowsUpdated = conn.cursor.execute(query, sqlParams)
if numRowsUpdated != 1:
self._logger.warn('jobStartNext: numRowsUpdated=%r instead of 1; '
'likely side-effect of transient connection '
'failure', numRowsUpdated)
return
@logExceptions(_LOGGER)
def jobStartNext(self):
""" For use only by Nupic Scheduler (also known as ClientJobManager) Look
through the jobs table and see if any new job requests have been
queued up. If so, pick one and mark it as starting up and create the
model table to hold the results
Parameters:
----------------------------------------------------------------
retval: jobID of the job we are starting up, if found; None if not found
"""
# NOTE: cursor.execute('SELECT @update_id') trick is unreliable: if a
# connection loss occurs during cursor.execute, then the server-cached
# information is lost, and we cannot get the updated job ID; so, we use
# this select instead
row = self._getOneMatchingRowWithRetries(
self._jobs, dict(status=self.STATUS_NOTSTARTED), ['job_id'])
if row is None:
return None
(jobID,) = row
self._startJobWithRetries(jobID)
return jobID
@logExceptions(_LOGGER)
@g_retrySQL
def jobReactivateRunningJobs(self):
""" Look through the jobs table and reactivate all that are already in the
running state by setting their _eng_allocate_new_workers fields to True;
used by Nupic Scheduler as part of its failure-recovery procedure.
"""
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET _eng_cjm_conn_id=%%s, ' \
' _eng_allocate_new_workers=TRUE ' \
' WHERE status=%%s ' \
% (self.jobsTableName,)
conn.cursor.execute(query, [self._connectionID, self.STATUS_RUNNING])
return
@logExceptions(_LOGGER)
def jobGetDemand(self,):
""" Look through the jobs table and get the demand - minimum and maximum
number of workers requested, if new workers are to be allocated, if there
are any untended dead workers, for all running jobs.
Parameters:
----------------------------------------------------------------
retval: list of ClientJobsDAO._jobs.jobDemandNamedTuple nametuples
containing the demand - min and max workers,
allocate_new_workers, untended_dead_workers, num_failed_workers
for each running (STATUS_RUNNING) job. Empty list when there
isn't any demand.
"""
rows = self._getMatchingRowsWithRetries(
self._jobs, dict(status=self.STATUS_RUNNING),
[self._jobs.pubToDBNameDict[f]
for f in self._jobs.jobDemandNamedTuple._fields])
return [self._jobs.jobDemandNamedTuple._make(r) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def jobCancelAllRunningJobs(self):
""" Set cancel field of all currently-running jobs to true.
"""
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE {0!s} SET cancel=TRUE WHERE status<>%s '.format(self.jobsTableName)
conn.cursor.execute(query, [self.STATUS_COMPLETED])
return
@logExceptions(_LOGGER)
@g_retrySQL
def jobCountCancellingJobs(self,):
""" Look through the jobs table and count the running jobs whose
cancel field is true.
Parameters:
----------------------------------------------------------------
retval: A count of running jobs with the cancel field set to true.
"""
with ConnectionFactory.get() as conn:
query = 'SELECT COUNT(job_id) '\
'FROM %s ' \
'WHERE (status<>%%s AND cancel is TRUE)' \
% (self.jobsTableName,)
conn.cursor.execute(query, [self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return rows[0][0]
@logExceptions(_LOGGER)
@g_retrySQL
def jobGetCancellingJobs(self,):
""" Look through the jobs table and get the list of running jobs whose
cancel field is true.
Parameters:
----------------------------------------------------------------
retval: A (possibly empty) sequence of running job IDs with cancel field
set to true
"""
with ConnectionFactory.get() as conn:
query = 'SELECT job_id '\
'FROM %s ' \
'WHERE (status<>%%s AND cancel is TRUE)' \
% (self.jobsTableName,)
conn.cursor.execute(query, [self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return tuple(r[0] for r in rows)
@staticmethod
@logExceptions(_LOGGER)
def partitionAtIntervals(data, intervals):
""" Generator to allow iterating slices at dynamic intervals
Parameters:
----------------------------------------------------------------
data: Any data structure that supports slicing (i.e. list or tuple)
*intervals: Iterable of intervals. The sum of intervals should be less
than, or equal to the length of data.
"""
assert sum(intervals) <= len(data)
start = 0
for interval in intervals:
end = start + interval
yield data[start:end]
start = end
raise StopIteration
@staticmethod
@logExceptions(_LOGGER)
def _combineResults(result, *namedTuples):
""" Return a list of namedtuples from the result of a join query. A
single database result is partitioned at intervals corresponding to the
fields in namedTuples. The return value is the result of applying
namedtuple._make() to each of the partitions, for each of the namedTuples.
Parameters:
----------------------------------------------------------------
result: Tuple representing a single result from a database query
*namedTuples: List of named tuples.
"""
results = ClientJobsDAO.partitionAtIntervals(
result, [len(nt._fields) for nt in namedTuples])
return [nt._make(result) for nt, result in zip(namedTuples, results)]
@logExceptions(_LOGGER)
@g_retrySQL
def jobInfoWithModels(self, jobID):
""" Get all info about a job, with model details, if available.
Parameters:
----------------------------------------------------------------
job: jobID of the job to query
retval: A sequence of two-tuples if the jobID exists in the jobs
table (exeption is raised if it doesn't exist). Each two-tuple
contains an instance of jobInfoNamedTuple as the first element and
an instance of modelInfoNamedTuple as the second element. NOTE: In
the case where there are no matching model rows, a sequence of one
two-tuple will still be returned, but the modelInfoNamedTuple
fields will be None, and the jobInfoNamedTuple fields will be
populated.
"""
# Get a database connection and cursor
combinedResults = None
with ConnectionFactory.get() as conn:
# NOTE: Since we're using a LEFT JOIN on the models table, there need not
# be a matching row in the models table, but the matching row from the
# jobs table will still be returned (along with all fields from the models
# table with values of None in case there were no matchings models)
query = ' '.join([
'SELECT {0!s}.*, {1!s}.*'.format(self.jobsTableName, self.modelsTableName),
'FROM {0!s}'.format(self.jobsTableName),
'LEFT JOIN {0!s} USING(job_id)'.format(self.modelsTableName),
'WHERE job_id=%s'])
conn.cursor.execute(query, (jobID,))
if conn.cursor.rowcount > 0:
combinedResults = [
ClientJobsDAO._combineResults(
result, self._jobs.jobInfoNamedTuple,
self._models.modelInfoNamedTuple
) for result in conn.cursor.fetchall()]
if combinedResults is not None:
return combinedResults
raise RuntimeError("jobID={0!s} not found within the jobs table".format((jobID)))
@logExceptions(_LOGGER)
def jobInfo(self, jobID):
""" Get all info about a job
Parameters:
----------------------------------------------------------------
job: jobID of the job to query
retval: namedtuple containing the job info.
"""
row = self._getOneMatchingRowWithRetries(
self._jobs, dict(job_id=jobID),
[self._jobs.pubToDBNameDict[n]
for n in self._jobs.jobInfoNamedTuple._fields])
if row is None:
raise RuntimeError("jobID={0!s} not found within the jobs table".format((jobID)))
# Create a namedtuple with the names to values
return self._jobs.jobInfoNamedTuple._make(row)
@logExceptions(_LOGGER)
@g_retrySQL
def jobSetStatus(self, jobID, status, useConnectionID=True,):
""" Change the status on the given job
Parameters:
----------------------------------------------------------------
job: jobID of the job to change status
status: new status string (ClientJobsDAO.STATUS_xxxxx)
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the job. Set
to False for hypersearch workers
"""
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET status=%%s, ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE job_id=%%s' \
% (self.jobsTableName,)
sqlParams = [status, jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
result = conn.cursor.execute(query, sqlParams)
if result != 1:
raise RuntimeError("Tried to change the status of job %d to %s, but "
"this job belongs to some other CJM" % (
jobID, status))
@logExceptions(_LOGGER)
@g_retrySQL
def jobSetCompleted(self, jobID, completionReason, completionMsg,
useConnectionID = True):
""" Change the status on the given job to completed
Parameters:
----------------------------------------------------------------
job: jobID of the job to mark as completed
completionReason: completionReason string
completionMsg: completionMsg string
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the job. Set
to False for hypersearch workers
"""
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET status=%%s, ' \
' completion_reason=%%s, ' \
' completion_msg=%%s, ' \
' end_time=UTC_TIMESTAMP(), ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE job_id=%%s' \
% (self.jobsTableName,)
sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg,
jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
result = conn.cursor.execute(query, sqlParams)
if result != 1:
raise RuntimeError("Tried to change the status of jobID=%s to "
"completed, but this job could not be found or "
"belongs to some other CJM" % (jobID))
@logExceptions(_LOGGER)
def jobCancel(self, jobID):
""" Cancel the given job. This will update the cancel field in the
jobs table and will result in the job being cancelled.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job to mark as completed
to False for hypersearch workers
"""
self._logger.info('Canceling jobID=%s', jobID)
# NOTE: jobSetFields does retries on transient mysql failures
self.jobSetFields(jobID, {"cancel" : True}, useConnectionID=False)
@logExceptions(_LOGGER)
def jobGetModelIDs(self, jobID):
"""Fetch all the modelIDs that correspond to a given jobID; empty sequence
if none"""
rows = self._getMatchingRowsWithRetries(self._models, dict(job_id=jobID),
['model_id'])
return [r[0] for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def getActiveJobCountForClientInfo(self, clientInfo):
""" Return the number of jobs for the given clientInfo and a status that is
not completed.
"""
with ConnectionFactory.get() as conn:
query = 'SELECT count(job_id) ' \
'FROM %s ' \
'WHERE client_info = %%s ' \
' AND status != %%s' % self.jobsTableName
conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED])
activeJobCount = conn.cursor.fetchone()[0]
return activeJobCount
@logExceptions(_LOGGER)
@g_retrySQL
def getActiveJobCountForClientKey(self, clientKey):
""" Return the number of jobs for the given clientKey and a status that is
not completed.
"""
with ConnectionFactory.get() as conn:
query = 'SELECT count(job_id) ' \
'FROM %s ' \
'WHERE client_key = %%s ' \
' AND status != %%s' % self.jobsTableName
conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])
activeJobCount = conn.cursor.fetchone()[0]
return activeJobCount
@logExceptions(_LOGGER)
@g_retrySQL
def getActiveJobsForClientInfo(self, clientInfo, fields=None):
""" Fetch jobIDs for jobs in the table with optional fields given a
specific clientInfo """
if fields is None:
fields = []
# Form the sequence of field name strings that will go into the
# request
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = 'SELECT %s FROM %s ' \
'WHERE client_info = %%s ' \
' AND status != %%s' % (dbFieldsStr, self.jobsTableName)
conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return rows
@logExceptions(_LOGGER)
@g_retrySQL
def getActiveJobsForClientKey(self, clientKey, fields=None):
""" Fetch jobIDs for jobs in the table with optional fields given a
specific clientKey """
if fields is None:
fields = []
# Form the sequence of field name strings that will go into the
# request
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = 'SELECT %s FROM %s ' \
'WHERE client_key = %%s ' \
' AND status != %%s' % (dbFieldsStr, self.jobsTableName)
conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return rows
@logExceptions(_LOGGER)
@g_retrySQL
def getJobs(self, fields=None):
""" Fetch jobIDs for jobs in the table with optional fields """
if fields is None:
fields = []
# Form the sequence of field name strings that will go into the
# request
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = 'SELECT {0!s} FROM {1!s}'.format(dbFieldsStr, self.jobsTableName)
conn.cursor.execute(query)
rows = conn.cursor.fetchall()
return rows
@logExceptions(_LOGGER)
@g_retrySQL
def getFieldsForActiveJobsOfType(self, jobType, fields=None):
""" Helper function for querying the models table including relevant job
info where the job type matches the specified jobType. Only records for
which there is a matching jobId in both tables is returned, and only the
requested fields are returned in each result, assuming that there is not
a conflict. This function is useful, for example, in querying a cluster
for a list of actively running production models (according to the state
of the client jobs database). jobType must be one of the JOB_TYPE_XXXX
enumerations.
Parameters:
----------------------------------------------------------------
jobType: jobType enum
fields: list of fields to return
Returns: List of tuples containing the jobId and requested field values
"""
if fields is None:
fields = []
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = \
'SELECT DISTINCT %s ' \
'FROM %s j ' \
'LEFT JOIN %s m USING(job_id) '\
'WHERE j.status != %%s ' \
'AND _eng_job_type = %%s' % (dbFieldsStr, self.jobsTableName,
self.modelsTableName)
conn.cursor.execute(query, [self.STATUS_COMPLETED, jobType])
return conn.cursor.fetchall()
@logExceptions(_LOGGER)
def jobGetFields(self, jobID, fields):
""" Fetch the values of 1 or more fields from a job record. Here, 'fields'
is a list with the names of the fields to fetch. The names are the public
names of the fields (camelBack, not the lower_case_only form as stored in
the DB).
Parameters:
----------------------------------------------------------------
jobID: jobID of the job record
fields: list of fields to return
Returns: A sequence of field values in the same order as the requested
field list -> [field1, field2, ...]
"""
# NOTE: jobsGetFields retries on transient mysql failures
return self.jobsGetFields([jobID], fields, requireAll=True)[0][1]
@logExceptions(_LOGGER)
def jobsGetFields(self, jobIDs, fields, requireAll=True):
""" Fetch the values of 1 or more fields from a sequence of job records.
Here, 'fields' is a sequence (list or tuple) with the names of the fields to
fetch. The names are the public names of the fields (camelBack, not the
lower_case_only form as stored in the DB).
WARNING!!!: The order of the results are NOT necessarily in the same order as
the order of the job IDs passed in!!!
Parameters:
----------------------------------------------------------------
jobIDs: A sequence of jobIDs
fields: A list of fields to return for each jobID
Returns: A list of tuples->(jobID, [field1, field2,...])
"""
assert isinstance(jobIDs, self._SEQUENCE_TYPES)
assert len(jobIDs) >=1
rows = self._getMatchingRowsWithRetries(
self._jobs, dict(job_id=jobIDs),
['job_id'] + [self._jobs.pubToDBNameDict[x] for x in fields])
if requireAll and len(rows) < len(jobIDs):
# NOTE: this will also trigger if the jobIDs list included duplicates
raise RuntimeError("jobIDs {0!s} not found within the jobs table".format(
(set(jobIDs) - set(r[0] for r in rows))))
return [(r[0], list(r[1:])) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def jobSetFields(self, jobID, fields, useConnectionID=True,
ignoreUnchanged=False):
""" Change the values of 1 or more fields in a job. Here, 'fields' is a
dict with the name/value pairs to change. The names are the public names of
the fields (camelBack, not the lower_case_only form as stored in the DB).
This method is for private use by the ClientJobManager only.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job record
fields: dictionary of fields to change
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the job. Set
to False for hypersearch workers
ignoreUnchanged: The default behavior is to throw a
RuntimeError if no rows are affected. This could either be
because:
1) Because there was not matching jobID
2) or if the data to update matched the data in the DB exactly.
Set this parameter to True if you expect case 2 and wish to
supress the error.
"""
# Form the sequecce of key=value strings that will go into the
# request
assignmentExpressions = ','.join(
["{0!s}=%s".format(self._jobs.pubToDBNameDict[f]) for f in fields.iterkeys()])
assignmentValues = fields.values()
query = 'UPDATE %s SET %s ' \
' WHERE job_id=%%s' \
% (self.jobsTableName, assignmentExpressions,)
sqlParams = assignmentValues + [jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
result = conn.cursor.execute(query, sqlParams)
if result != 1 and not ignoreUnchanged:
raise RuntimeError(
"Tried to change fields (%r) of jobID=%s conn_id=%r), but an error " \
"occurred. result=%r; query=%r" % (
assignmentExpressions, jobID, self._connectionID, result, query))
@logExceptions(_LOGGER)
@g_retrySQL
def jobSetFieldIfEqual(self, jobID, fieldName, newValue, curValue):
""" Change the value of 1 field in a job to 'newValue', but only if the
current value matches 'curValue'. The 'fieldName' is the public name of
the field (camelBack, not the lower_case_only form as stored in the DB).
This method is used for example by HypersearcWorkers to update the
engWorkerState field periodically. By qualifying on curValue, it insures
that only 1 worker at a time is elected to perform the next scheduled
periodic sweep of the models.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job record to modify
fieldName: public field name of the field
newValue: new value of the field to set
curValue: current value to qualify against
retval: True if we successfully modified the field
False if curValue did not match
"""
# Get the private field name and string form of the value
dbFieldName = self._jobs.pubToDBNameDict[fieldName]
conditionValue = []
if isinstance(curValue, bool):
conditionExpression = '{0!s} IS {1!s}'.format(
dbFieldName, {True:'TRUE', False:'FALSE'}[curValue])
elif curValue is None:
conditionExpression = '{0!s} is NULL'.format(dbFieldName)
else:
conditionExpression = '{0!s}=%s'.format(dbFieldName)
conditionValue.append(curValue)
query = 'UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), %s=%%s ' \
' WHERE job_id=%%s AND %s' \
% (self.jobsTableName, dbFieldName, conditionExpression)
sqlParams = [newValue, jobID] + conditionValue
with ConnectionFactory.get() as conn:
result = conn.cursor.execute(query, sqlParams)
return (result == 1)
@logExceptions(_LOGGER)
@g_retrySQL
def jobIncrementIntField(self, jobID, fieldName, increment=1,
useConnectionID=False):
""" Incremet the value of 1 field in a job by increment. The 'fieldName' is
the public name of the field (camelBack, not the lower_case_only form as
stored in the DB).
This method is used for example by HypersearcWorkers to update the
engWorkerState field periodically. By qualifying on curValue, it insures
that only 1 worker at a time is elected to perform the next scheduled
periodic sweep of the models.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job record to modify
fieldName: public field name of the field
increment: increment is added to the current value of the field
"""
# Get the private field name and string form of the value
dbFieldName = self._jobs.pubToDBNameDict[fieldName]
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET %s=%s+%%s ' \
' WHERE job_id=%%s' \
% (self.jobsTableName, dbFieldName, dbFieldName)
sqlParams = [increment, jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
result = conn.cursor.execute(query, sqlParams)
if result != 1:
raise RuntimeError(
"Tried to increment the field (%r) of jobID=%s (conn_id=%r), but an " \
"error occurred. result=%r; query=%r" % (
dbFieldName, jobID, self._connectionID, result, query))
@logExceptions(_LOGGER)
@g_retrySQL
def jobUpdateResults(self, jobID, results):
""" Update the results string and last-update-time fields of a model.
Parameters:
----------------------------------------------------------------
jobID: job ID of model to modify
results: new results (json dict string)
"""
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), ' \
' results=%%s ' \
' WHERE job_id=%%s' % (self.jobsTableName,)
conn.cursor.execute(query, [results, jobID])
@logExceptions(_LOGGER)
@g_retrySQL
def modelsClearAll(self):
""" Delete all models from the models table
Parameters:
----------------------------------------------------------------
"""
self._logger.info('Deleting all rows from models table %r',
self.modelsTableName)
with ConnectionFactory.get() as conn:
query = 'DELETE FROM {0!s}'.format((self.modelsTableName))
conn.cursor.execute(query)
@logExceptions(_LOGGER)
def modelInsertAndStart(self, jobID, params, paramsHash, particleHash=None):
""" Insert a new unique model (based on params) into the model table in the
"running" state. This will return two things: whether or not the model was
actually inserted (i.e. that set of params isn't already in the table) and
the modelID chosen for that set of params. Even if the model was not
inserted by this call (it was already there) the modelID of the one already
inserted is returned.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job to add models for
params: params for this model
paramsHash hash of the params, generated by the worker
particleHash hash of the particle info (for PSO). If not provided,
then paramsHash will be used.
retval: (modelID, wasInserted)
modelID: the model ID for this set of params
wasInserted: True if this call ended up inserting the
new model. False if this set of params was already in
the model table.
"""
# Fill in default particleHash
if particleHash is None:
particleHash = paramsHash
# Normalize hashes
paramsHash = self._normalizeHash(paramsHash)
particleHash = self._normalizeHash(particleHash)
def findExactMatchNoRetries(conn):
return self._getOneMatchingRowNoRetries(
self._models, conn,
{'job_id':jobID, '_eng_params_hash':paramsHash,
'_eng_particle_hash':particleHash},
['model_id', '_eng_worker_conn_id'])
@g_retrySQL
def findExactMatchWithRetries():
with ConnectionFactory.get() as conn:
return findExactMatchNoRetries(conn)
# Check if the model is already in the models table
#
# NOTE: with retries of mysql transient failures, we can't always tell
# whether the row was already inserted (e.g., comms failure could occur
# after insertion into table, but before arrival or response), so the
# need to check before attempting to insert a new row
#
# TODO: if we could be assured that the caller already verified the
# model's absence before calling us, we could skip this check here
row = findExactMatchWithRetries()
if row is not None:
return (row[0], False)
@g_retrySQL
def insertModelWithRetries():
""" NOTE: it's possible that another process on some machine is attempting
to insert the same model at the same time as the caller """
with ConnectionFactory.get() as conn:
# Create a new job entry
query = 'INSERT INTO %s (job_id, params, status, _eng_params_hash, ' \
' _eng_particle_hash, start_time, _eng_last_update_time, ' \
' _eng_worker_conn_id) ' \
' VALUES (%%s, %%s, %%s, %%s, %%s, UTC_TIMESTAMP(), ' \
' UTC_TIMESTAMP(), %%s) ' \
% (self.modelsTableName,)
sqlParams = (jobID, params, self.STATUS_RUNNING, paramsHash,
particleHash, self._connectionID)
try:
numRowsAffected = conn.cursor.execute(query, sqlParams)
except Exception, e:
# NOTE: We have seen instances where some package in the calling
# chain tries to interpret the exception message using unicode.
# Since the exception message contains binary data (the hashes), this
# can in turn generate a Unicode translation exception. So, we catch
# ALL exceptions here and look for the string "Duplicate entry" in
# the exception args just in case this happens. For example, the
# Unicode exception we might get is:
# (<type 'exceptions.UnicodeDecodeError'>, UnicodeDecodeError('utf8', "Duplicate entry '1000-?.\x18\xb1\xd3\xe0CO\x05\x8b\xf80\xd7E5\xbb' for key 'job_id'", 25, 26, 'invalid start byte'))
#
# If it weren't for this possible Unicode translation error, we
# could watch for only the exceptions we want, like this:
# except pymysql.IntegrityError, e:
# if e.args[0] != mysqlerrors.DUP_ENTRY:
# raise
if "Duplicate entry" not in str(e):
raise
# NOTE: duplicate entry scenario: however, we can't discern
# whether it was inserted by another process or this one, because an
# intermittent failure may have caused us to retry
self._logger.info('Model insert attempt failed with DUP_ENTRY: '
'jobID=%s; paramsHash=%s OR particleHash=%s; %r',
jobID, paramsHash.encode('hex'),
particleHash.encode('hex'), e)
else:
if numRowsAffected == 1:
# NOTE: SELECT LAST_INSERT_ID() returns 0 after re-connection
conn.cursor.execute('SELECT LAST_INSERT_ID()')
modelID = conn.cursor.fetchall()[0][0]
if modelID != 0:
return (modelID, True)
else:
self._logger.warn(
'SELECT LAST_INSERT_ID for model returned 0, implying loss of '
'connection: jobID=%s; paramsHash=%r; particleHash=%r',
jobID, paramsHash, particleHash)
else:
self._logger.error(
'Attempt to insert model resulted in unexpected numRowsAffected: '
'expected 1, but got %r; jobID=%s; paramsHash=%r; '
'particleHash=%r',
numRowsAffected, jobID, paramsHash, particleHash)
# Look up the model and discern whether it is tagged with our conn id
row = findExactMatchNoRetries(conn)
if row is not None:
(modelID, connectionID) = row
return (modelID, connectionID == self._connectionID)
# This set of params is already in the table, just get the modelID
query = 'SELECT (model_id) FROM %s ' \
' WHERE job_id=%%s AND ' \
' (_eng_params_hash=%%s ' \
' OR _eng_particle_hash=%%s) ' \
' LIMIT 1 ' \
% (self.modelsTableName,)
sqlParams = [jobID, paramsHash, particleHash]
numRowsFound = conn.cursor.execute(query, sqlParams)
assert numRowsFound == 1, (
'Model not found: jobID=%s AND (paramsHash=%r OR particleHash=%r); '
'numRowsFound=%r') % (jobID, paramsHash, particleHash, numRowsFound)
(modelID,) = conn.cursor.fetchall()[0]
return (modelID, False)
return insertModelWithRetries()
@logExceptions(_LOGGER)
def modelsInfo(self, modelIDs):
""" Get ALL info for a set of models
WARNING!!!: The order of the results are NOT necessarily in the same order as
the order of the model IDs passed in!!!
Parameters:
----------------------------------------------------------------
modelIDs: list of model IDs
retval: list of nametuples containing all the fields stored for each
model.
"""
assert isinstance(modelIDs, self._SEQUENCE_TYPES), (
"wrong modelIDs type: %s") % (type(modelIDs),)
assert modelIDs, "modelIDs is empty"
rows = self._getMatchingRowsWithRetries(
self._models, dict(model_id=modelIDs),
[self._models.pubToDBNameDict[f]
for f in self._models.modelInfoNamedTuple._fields])
results = [self._models.modelInfoNamedTuple._make(r) for r in rows]
# NOTE: assetion will also fail if modelIDs contains duplicates
assert len(results) == len(modelIDs), "modelIDs not found: {0!s}".format((
set(modelIDs) - set(r.modelId for r in results)))
return results
@logExceptions(_LOGGER)
def modelsGetFields(self, modelIDs, fields):
""" Fetch the values of 1 or more fields from a sequence of model records.
Here, 'fields' is a list with the names of the fields to fetch. The names
are the public names of the fields (camelBack, not the lower_case_only form
as stored in the DB).
WARNING!!!: The order of the results are NOT necessarily in the same order
as the order of the model IDs passed in!!!
Parameters:
----------------------------------------------------------------
modelIDs: A single modelID or sequence of modelIDs
fields: A list of fields to return
Returns: If modelIDs is a sequence:
a list of tuples->(modelID, [field1, field2,...])
If modelIDs is a single modelID:
a list of field values->[field1, field2,...]
"""
assert len(fields) >= 1, 'fields is empty'
# Form the sequence of field name strings that will go into the
# request
isSequence = isinstance(modelIDs, self._SEQUENCE_TYPES)
if isSequence:
assert len(modelIDs) >=1, 'modelIDs is empty'
else:
modelIDs = [modelIDs]
rows = self._getMatchingRowsWithRetries(
self._models, dict(model_id=modelIDs),
['model_id'] + [self._models.pubToDBNameDict[f] for f in fields])
if len(rows) < len(modelIDs):
raise RuntimeError("modelIDs not found within the models table: {0!s}".format(
(set(modelIDs) - set(r[0] for r in rows))))
if not isSequence:
return list(rows[0][1:])
return [(r[0], list(r[1:])) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def modelsGetFieldsForJob(self, jobID, fields, ignoreKilled=False):
""" Gets the specified fields for all the models for a single job. This is
similar to modelsGetFields
Parameters:
----------------------------------------------------------------
jobID: jobID for the models to be searched
fields: A list of fields to return
ignoreKilled: (True/False). If True, this will ignore models that
have been killed
Returns: a (possibly empty) list of tuples as follows
[
(model_id1, [field1, ..., fieldn]),
(model_id2, [field1, ..., fieldn]),
(model_id3, [field1, ..., fieldn])
...
]
NOTE: since there is a window of time between a job getting inserted into
jobs table and the job's worker(s) starting up and creating models, an
empty-list result is one of the normal outcomes.
"""
assert len(fields) >= 1, 'fields is empty'
# Form the sequence of field name strings that will go into the
# request
dbFields = [self._models.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(dbFields)
query = 'SELECT model_id, %s FROM %s ' \
' WHERE job_id=%%s ' \
% (dbFieldsStr, self.modelsTableName)
sqlParams = [jobID]
if ignoreKilled:
query += ' AND (completion_reason IS NULL OR completion_reason != %s)'
sqlParams.append(self.CMPL_REASON_KILLED)
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
conn.cursor.execute(query, sqlParams)
rows = conn.cursor.fetchall()
if rows is None:
# fetchall is defined to return a (possibly-empty) sequence of
# sequences; however, we occasionally see None returned and don't know
# why...
self._logger.error("Unexpected None result from cursor.fetchall; "
"query=%r; Traceback=%r",
query, traceback.format_exc())
return [(r[0], list(r[1:])) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def modelsGetFieldsForCheckpointed(self, jobID, fields):
"""
Gets fields from all models in a job that have been checkpointed. This is
used to figure out whether or not a new model should be checkpointed.
Parameters:
-----------------------------------------------------------------------
jobID: The jobID for the models to be searched
fields: A list of fields to return
Returns: a (possibly-empty) list of tuples as follows
[
(model_id1, [field1, ..., fieldn]),
(model_id2, [field1, ..., fieldn]),
(model_id3, [field1, ..., fieldn])
...
]
"""
assert len(fields) >= 1, "fields is empty"
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
dbFields = [self._models.pubToDBNameDict[f] for f in fields]
dbFieldStr = ", ".join(dbFields)
query = 'SELECT model_id, {fields} from {models}' \
' WHERE job_id=%s AND model_checkpoint_id IS NOT NULL'.format(
fields=dbFieldStr, models=self.modelsTableName)
conn.cursor.execute(query, [jobID])
rows = conn.cursor.fetchall()
return [(r[0], list(r[1:])) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def modelSetFields(self, modelID, fields, ignoreUnchanged = False):
""" Change the values of 1 or more fields in a model. Here, 'fields' is a
dict with the name/value pairs to change. The names are the public names of
the fields (camelBack, not the lower_case_only form as stored in the DB).
Parameters:
----------------------------------------------------------------
jobID: jobID of the job record
fields: dictionary of fields to change
ignoreUnchanged: The default behavior is to throw a
RuntimeError if no rows are affected. This could either be
because:
1) Because there was no matching modelID
2) or if the data to update matched the data in the DB exactly.
Set this parameter to True if you expect case 2 and wish to
supress the error.
"""
# Form the sequence of key=value strings that will go into the
# request
assignmentExpressions = ','.join(
'{0!s}=%s'.format(self._models.pubToDBNameDict[f]) for f in fields.iterkeys())
assignmentValues = fields.values()
query = 'UPDATE %s SET %s, update_counter = update_counter+1 ' \
' WHERE model_id=%%s' \
% (self.modelsTableName, assignmentExpressions)
sqlParams = assignmentValues + [modelID]
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
numAffectedRows = conn.cursor.execute(query, sqlParams)
self._logger.debug("Executed: numAffectedRows=%r, query=%r, sqlParams=%r",
numAffectedRows, query, sqlParams)
if numAffectedRows != 1 and not ignoreUnchanged:
raise RuntimeError(
("Tried to change fields (%r) of model %r (conn_id=%r), but an error "
"occurred. numAffectedRows=%r; query=%r; sqlParams=%r") % (
fields, modelID, self._connectionID, numAffectedRows, query,
sqlParams,))
@logExceptions(_LOGGER)
def modelsGetParams(self, modelIDs):
""" Get the params and paramsHash for a set of models.
WARNING!!!: The order of the results are NOT necessarily in the same order as
the order of the model IDs passed in!!!
Parameters:
----------------------------------------------------------------
modelIDs: list of model IDs
retval: list of result namedtuples defined in
ClientJobsDAO._models.getParamsNamedTuple. Each tuple
contains: (modelId, params, engParamsHash)
"""
assert isinstance(modelIDs, self._SEQUENCE_TYPES), (
"Wrong modelIDs type: %r") % (type(modelIDs),)
assert len(modelIDs) >= 1, "modelIDs is empty"
rows = self._getMatchingRowsWithRetries(
self._models, {'model_id' : modelIDs},
[self._models.pubToDBNameDict[f]
for f in self._models.getParamsNamedTuple._fields])
# NOTE: assertion will also fail when modelIDs contains duplicates
assert len(rows) == len(modelIDs), "Didn't find modelIDs: {0!r}".format(
(set(modelIDs) - set(r[0] for r in rows)))
# Return the params and params hashes as a namedtuple
return [self._models.getParamsNamedTuple._make(r) for r in rows]
@logExceptions(_LOGGER)
def modelsGetResultAndStatus(self, modelIDs):
""" Get the results string and other status fields for a set of models.
WARNING!!!: The order of the results are NOT necessarily in the same order
as the order of the model IDs passed in!!!
For each model, this returns a tuple containing:
(modelID, results, status, updateCounter, numRecords, completionReason,
completionMsg, engParamsHash
Parameters:
----------------------------------------------------------------
modelIDs: list of model IDs
retval: list of result tuples. Each tuple contains:
(modelID, results, status, updateCounter, numRecords,
completionReason, completionMsg, engParamsHash)
"""
assert isinstance(modelIDs, self._SEQUENCE_TYPES), (
"Wrong modelIDs type: %r") % type(modelIDs)
assert len(modelIDs) >= 1, "modelIDs is empty"
rows = self._getMatchingRowsWithRetries(
self._models, {'model_id' : modelIDs},
[self._models.pubToDBNameDict[f]
for f in self._models.getResultAndStatusNamedTuple._fields])
# NOTE: assertion will also fail when modelIDs contains duplicates
assert len(rows) == len(modelIDs), "Didn't find modelIDs: {0!r}".format(
(set(modelIDs) - set(r[0] for r in rows)))
# Return the results as a list of namedtuples
return [self._models.getResultAndStatusNamedTuple._make(r) for r in rows]
@logExceptions(_LOGGER)
def modelsGetUpdateCounters(self, jobID):
""" Return info on all of the models that are in already in the models
table for a given job. For each model, this returns a tuple
containing: (modelID, updateCounter).
Note that we don't return the results for all models, since the results
string could be quite large. The information we are returning is
just 2 integer fields.
Parameters:
----------------------------------------------------------------
jobID: jobID to query
retval: (possibly empty) list of tuples. Each tuple contains:
(modelID, updateCounter)
"""
rows = self._getMatchingRowsWithRetries(
self._models, {'job_id' : jobID},
[self._models.pubToDBNameDict[f]
for f in self._models.getUpdateCountersNamedTuple._fields])
# Return the results as a list of namedtuples
return [self._models.getUpdateCountersNamedTuple._make(r) for r in rows]
@logExceptions(_LOGGER)
@g_retrySQL
def modelUpdateResults(self, modelID, results=None, metricValue =None,
numRecords=None):
""" Update the results string, and/or num_records fields of
a model. This will fail if the model does not currently belong to this
client (connection_id doesn't match).
Parameters:
----------------------------------------------------------------
modelID: model ID of model to modify
results: new results, or None to ignore
metricValue: the value of the metric being optimized, or None to ignore
numRecords: new numRecords, or None to ignore
"""
assignmentExpressions = ['_eng_last_update_time=UTC_TIMESTAMP()',
'update_counter=update_counter+1']
assignmentValues = []
if results is not None:
assignmentExpressions.append('results=%s')
assignmentValues.append(results)
if numRecords is not None:
assignmentExpressions.append('num_records=%s')
assignmentValues.append(numRecords)
# NOTE1: (metricValue==metricValue) tests for Nan
# NOTE2: metricValue is being passed as numpy.float64
if metricValue is not None and (metricValue==metricValue):
assignmentExpressions.append('optimized_metric=%s')
assignmentValues.append(float(metricValue))
query = 'UPDATE %s SET %s ' \
' WHERE model_id=%%s and _eng_worker_conn_id=%%s' \
% (self.modelsTableName, ','.join(assignmentExpressions))
sqlParams = assignmentValues + [modelID, self._connectionID]
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
numRowsAffected = conn.cursor.execute(query, sqlParams)
if numRowsAffected != 1:
raise InvalidConnectionException(
("Tried to update the info of modelID=%r using connectionID=%r, but "
"this model belongs to some other worker or modelID not found; "
"numRowsAffected=%r") % (modelID,self._connectionID, numRowsAffected,))
def modelUpdateTimestamp(self, modelID):
self.modelUpdateResults(modelID)
@logExceptions(_LOGGER)
@g_retrySQL
def modelSetCompleted(self, modelID, completionReason, completionMsg,
cpuTime=0, useConnectionID=True):
""" Mark a model as completed, with the given completionReason and
completionMsg. This will fail if the model does not currently belong to this
client (connection_id doesn't match).
Parameters:
----------------------------------------------------------------
modelID: model ID of model to modify
completionReason: completionReason string
completionMsg: completionMsg string
cpuTime: amount of CPU time spent on this model
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the
job. Set to True for hypersearch workers, which use
this mechanism for orphaned model detection.
"""
if completionMsg is None:
completionMsg = ''
query = 'UPDATE %s SET status=%%s, ' \
' completion_reason=%%s, ' \
' completion_msg=%%s, ' \
' end_time=UTC_TIMESTAMP(), ' \
' cpu_time=%%s, ' \
' _eng_last_update_time=UTC_TIMESTAMP(), ' \
' update_counter=update_counter+1 ' \
' WHERE model_id=%%s' \
% (self.modelsTableName,)
sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg,
cpuTime, modelID]
if useConnectionID:
query += " AND _eng_worker_conn_id=%s"
sqlParams.append(self._connectionID)
with ConnectionFactory.get() as conn:
numRowsAffected = conn.cursor.execute(query, sqlParams)
if numRowsAffected != 1:
raise InvalidConnectionException(
("Tried to set modelID=%r using connectionID=%r, but this model "
"belongs to some other worker or modelID not found; "
"numRowsAffected=%r") % (modelID, self._connectionID, numRowsAffected))
@logExceptions(_LOGGER)
def modelAdoptNextOrphan(self, jobId, maxUpdateInterval):
""" Look through the models table for an orphaned model, which is a model
that is not completed yet, whose _eng_last_update_time is more than
maxUpdateInterval seconds ago.
If one is found, change its _eng_worker_conn_id to the current worker's
and return the model id.
Parameters:
----------------------------------------------------------------
retval: modelId of the model we adopted, or None if none found
"""
@g_retrySQL
def findCandidateModelWithRetries():
modelID = None
with ConnectionFactory.get() as conn:
# TODO: may need a table index on job_id/status for speed
query = 'SELECT model_id FROM %s ' \
' WHERE status=%%s ' \
' AND job_id=%%s ' \
' AND TIMESTAMPDIFF(SECOND, ' \
' _eng_last_update_time, ' \
' UTC_TIMESTAMP()) > %%s ' \
' LIMIT 1 ' \
% (self.modelsTableName,)
sqlParams = [self.STATUS_RUNNING, jobId, maxUpdateInterval]
numRows = conn.cursor.execute(query, sqlParams)
rows = conn.cursor.fetchall()
assert numRows <= 1, "Unexpected numRows: {0!r}".format(numRows)
if numRows == 1:
(modelID,) = rows[0]
return modelID
@g_retrySQL
def adoptModelWithRetries(modelID):
adopted = False
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET _eng_worker_conn_id=%%s, ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE model_id=%%s ' \
' AND status=%%s' \
' AND TIMESTAMPDIFF(SECOND, ' \
' _eng_last_update_time, ' \
' UTC_TIMESTAMP()) > %%s ' \
' LIMIT 1 ' \
% (self.modelsTableName,)
sqlParams = [self._connectionID, modelID, self.STATUS_RUNNING,
maxUpdateInterval]
numRowsAffected = conn.cursor.execute(query, sqlParams)
assert numRowsAffected <= 1, 'Unexpected numRowsAffected={0!r}'.format(
numRowsAffected)
if numRowsAffected == 1:
adopted = True
else:
# Discern between transient failure during update and someone else
# claiming this model
(status, connectionID) = self._getOneMatchingRowNoRetries(
self._models, conn, {'model_id':modelID},
['status', '_eng_worker_conn_id'])
adopted = (status == self.STATUS_RUNNING and
connectionID == self._connectionID)
return adopted
adoptedModelID = None
while True:
modelID = findCandidateModelWithRetries()
if modelID is None:
break
if adoptModelWithRetries(modelID):
adoptedModelID = modelID
break
return adoptedModelID
#def testClientJobsDAO():
# # WARNING: these tests assume that Nupic Scheduler is not running, and bad
# # things will happen if the test is executed while the Scheduler is running
#
# # TODO: This test code is out of date: e.g., at the time of this writing,
# # jobStartNext() advances a job's status to STATUS_RUNNING instead of
# # STATUS_STARTING; etc.
#
# import time
# import hashlib
# import pprint
#
# # Clear out the database
# cjDAO = ClientJobsDAO.get()
# cjDAO.connect(deleteOldVersions=True, recreate=True)
#
#
# # --------------------------------------------------------------------
# # Test inserting a new job that doesn't have to be unique
# jobID1 = cjDAO.jobInsert(client='test', cmdLine='echo hi',
# clientInfo='client info', params='job params')
# print "Inserted job %d" % (jobID1)
#
# jobID2 = cjDAO.jobInsert(client='test', cmdLine='echo hi',
# clientInfo='client info', params='job params')
# print "Inserted job %d" % (jobID2)
#
#
# # --------------------------------------------------------------------
# # Test starting up those jobs
# jobID = cjDAO.jobStartNext()
# print "started job %d" % (jobID)
# assert (jobID == jobID1)
# info = cjDAO.jobInfo(jobID)
# print "jobInfo:"
# pprint.pprint(info)
# assert (info.status == cjDAO.STATUS_STARTING)
#
# jobID = cjDAO.jobStartNext()
# print "started job %d" % (jobID)
# assert (jobID == jobID2)
# info = cjDAO.jobInfo(jobID)
# print "jobInfo:"
# pprint.pprint(info)
# assert (info.status == cjDAO.STATUS_STARTING)
#
#
# # --------------------------------------------------------------------
# # Test inserting a unique job
# jobHash = '01234'
# (success, jobID3) = cjDAO.jobInsertUnique(client='testuniq',
# cmdLine='echo hi',
# jobHash=jobHash, clientInfo='client info', params='job params')
# print "Inserted unique job %d" % (jobID3)
# assert (success)
#
# # This should return the same jobID
# (success, jobID4) = cjDAO.jobInsertUnique(client='testuniq',
# cmdLine='echo hi',
# jobHash=jobHash, clientInfo='client info', params='job params')
# print "tried to insert again %d" % (jobID4)
# assert (not success and jobID4 == jobID3)
#
#
# # Mark it as completed
# jobID = cjDAO.jobStartNext()
# assert (jobID == jobID3)
# cjDAO.jobSetStatus(jobID3, cjDAO.STATUS_COMPLETED)
#
#
# # This should return success
# (success, jobID4) = cjDAO.jobInsertUnique(client='testuniq',
# cmdLine='echo hi',
# jobHash=jobHash, clientInfo='client info', params='job params')
# print "Inserted unique job %d" % (jobID4)
# assert (success)
#
#
# # --------------------------------------------------------------------
# # Test inserting a pre-started job
# jobID5 = cjDAO.jobInsert(client='test', cmdLine='echo hi',
# clientInfo='client info', params='job params',
# alreadyRunning=True)
# print "Inserted prestarted job %d" % (jobID5)
#
# info = cjDAO.jobInfo(jobID5)
# print "jobInfo:"
# pprint.pprint(info)
# assert (info.status == cjDAO.STATUS_TESTMODE)
#
#
#
# # --------------------------------------------------------------------
# # Test the jobInfo and jobSetFields calls
# jobInfo = cjDAO.jobInfo(jobID2)
# print "job info:"
# pprint.pprint(jobInfo)
# newFields = dict(maximumWorkers=43)
# cjDAO.jobSetFields(jobID2, newFields)
# jobInfo = cjDAO.jobInfo(jobID2)
# assert(jobInfo.maximumWorkers == newFields['maximumWorkers'])
#
#
# # --------------------------------------------------------------------
# # Test the jobGetFields call
# values = cjDAO.jobGetFields(jobID2, ['maximumWorkers'])
# assert (values[0] == newFields['maximumWorkers'])
#
#
# # --------------------------------------------------------------------
# # Test the jobSetFieldIfEqual call
# values = cjDAO.jobGetFields(jobID2, ['engWorkerState'])
# assert (values[0] == None)
#
# # Change from None to test
# success = cjDAO.jobSetFieldIfEqual(jobID2, 'engWorkerState',
# newValue='test', curValue=None)
# assert (success)
# values = cjDAO.jobGetFields(jobID2, ['engWorkerState'])
# assert (values[0] == 'test')
#
# # Change from test1 to test2 (should fail)
# success = cjDAO.jobSetFieldIfEqual(jobID2, 'engWorkerState',
# newValue='test2', curValue='test1')
# assert (not success)
# values = cjDAO.jobGetFields(jobID2, ['engWorkerState'])
# assert (values[0] == 'test')
#
# # Change from test to test2
# success = cjDAO.jobSetFieldIfEqual(jobID2, 'engWorkerState',
# newValue='test2', curValue='test')
# assert (success)
# values = cjDAO.jobGetFields(jobID2, ['engWorkerState'])
# assert (values[0] == 'test2')
#
# # Change from test2 to None
# success = cjDAO.jobSetFieldIfEqual(jobID2, 'engWorkerState',
# newValue=None, curValue='test2')
# assert (success)
# values = cjDAO.jobGetFields(jobID2, ['engWorkerState'])
# assert (values[0] == None)
#
#
# # --------------------------------------------------------------------
# # Test job demands
# jobID6 = cjDAO.jobInsert(client='test', cmdLine='echo hi',
# clientInfo='client info', params='job params',
# minimumWorkers=1, maximumWorkers=1,
# alreadyRunning=False)
# jobID7 = cjDAO.jobInsert(client='test', cmdLine='echo hi',
# clientInfo='client info', params='job params',
# minimumWorkers=4, maximumWorkers=10,
# alreadyRunning=False)
# cjDAO.jobSetStatus(jobID6, ClientJobsDAO.STATUS_RUNNING,
# useConnectionID=False,)
# cjDAO.jobSetStatus(jobID7, ClientJobsDAO.STATUS_RUNNING,
# useConnectionID=False,)
# jobsDemand = cjDAO.jobGetDemand()
# assert (jobsDemand[0].minimumWorkers==1 and jobsDemand[0].maximumWorkers==1)
# assert (jobsDemand[1].minimumWorkers==4 and jobsDemand[1].maximumWorkers==10)
# assert (jobsDemand[0].engAllocateNewWorkers == True and \
# jobsDemand[0].engUntendedDeadWorkers == False)
#
# # Test increment field
# values = cjDAO.jobGetFields(jobID7, ['numFailedWorkers'])
# assert (values[0] == 0)
# cjDAO.jobIncrementIntField(jobID7, 'numFailedWorkers', 1)
# values = cjDAO.jobGetFields(jobID7, ['numFailedWorkers'])
# assert (values[0] == 1)
#
# # --------------------------------------------------------------------
# # Test inserting new models
#
# params = "params1"
# hash1 = hashlib.md5(params).digest()
# (modelID1, ours) = cjDAO.modelInsertAndStart(jobID, params, hash1)
# print "insert %s,%s:" % (params, hash1.encode('hex')), modelID1, ours
# assert (ours)
#
# params = "params2"
# hash2 = hashlib.md5(params).digest()
# (modelID2, ours) = cjDAO.modelInsertAndStart(jobID, params, hash2)
# print "insert %s,%s:" % (params, hash2.encode('hex')), modelID2, ours
# assert (ours)
#
# params = "params3"
# hash3 = hashlib.md5(params).digest()
# (modelID3, ours) = cjDAO.modelInsertAndStart(jobID, params, hash3)
# print "insert %s,%s:" % (params, hash3.encode('hex')), modelID3, ours
# assert (ours)
#
# params = "params4"
# hash4 = hashlib.md5(params).digest()
# (modelID4, ours) = cjDAO.modelInsertAndStart(jobID, params, hash4)
# print "insert %s,%s:" % (params, hash4.encode('hex')), modelID4, ours
# assert (ours)
#
# params = "params5"
# hash5 = hashlib.md5(params).digest()
# (modelID5, ours) = cjDAO.modelInsertAndStart(jobID, params, hash5)
# print "insert %s,%s:" % (params, hash5.encode('hex')), modelID5, ours
# assert (ours)
#
#
# # Try to insert the same model again
# params = "params2"
# hash = hashlib.md5(params).digest()
# (modelID, ours) = cjDAO.modelInsertAndStart(jobID, params, hash)
# print "insert %s,%s:" % (params, hash.encode('hex')), modelID, ours
# assert (not ours and modelID == modelID2)
#
#
# # ---------------------------------------------------------------
# # Test inserting models with unique particle hashes
# params = "params6"
# paramsHash = hashlib.md5(params).digest()
# particle = "particle6"
# particleHash = hashlib.md5(particle).digest()
# (modelID6, ours) = cjDAO.modelInsertAndStart(jobID, params, paramsHash,
# particleHash)
# print "insert %s,%s,%s:" % (params, paramsHash.encode('hex'),
# particleHash.encode('hex')), modelID6, ours
# assert (ours)
#
# # Should fail if we insert with the same params hash
# params = "params6"
# paramsHash = hashlib.md5(params).digest()
# particle = "particleUnique"
# particleHash = hashlib.md5(particle).digest()
# (modelID, ours) = cjDAO.modelInsertAndStart(jobID, params, paramsHash,
# particleHash)
# print "insert %s,%s,%s:" % (params, paramsHash.encode('hex'),
# particleHash.encode('hex')), modelID6, ours
# assert (not ours and modelID == modelID6)
#
# # Should fail if we insert with the same particle hash
# params = "paramsUnique"
# paramsHash = hashlib.md5(params).digest()
# particle = "particle6"
# particleHash = hashlib.md5(particle).digest()
# (modelID, ours) = cjDAO.modelInsertAndStart(jobID, params, paramsHash,
# particleHash)
# print "insert %s,%s,%s:" % (params, paramsHash.encode('hex'),
# particleHash.encode('hex')), modelID6, ours
# assert (not ours and modelID == modelID6)
#
#
#
# # --------------------------------------------------------------------
# # Test getting params for a set of models
# paramsAndHash = cjDAO.modelsGetParams([modelID1, modelID2])
# print "modelID, params, paramsHash of %s:" % ([modelID1, modelID2])
# for (modelID, params, hash) in paramsAndHash:
# print " ", modelID, params, hash.encode('hex')
# if modelID == modelID1:
# assert (params == "params1" and hash == hash1)
# elif modelID == modelID2:
# assert (params == "params2" and hash == hash2)
# else:
# assert (false)
#
#
# # Set some to notstarted
# #cjDAO.modelUpdateStatus(modelID2, status=cjDAO.STATUS_NOTSTARTED)
# #cjDAO.modelUpdateStatus(modelID3, status=cjDAO.STATUS_NOTSTARTED)
#
#
# # --------------------------------------------------------------------
# # Test Update model info
# cjDAO.modelUpdateResults(modelID2, results="hi there")
# cjDAO.modelUpdateResults(modelID3, numRecords=100)
# cjDAO.modelUpdateResults(modelID3, numRecords=110)
# cjDAO.modelUpdateResults(modelID4, results="bye", numRecords=42)
# cjDAO.modelUpdateResults(modelID5, results="hello", numRecords=4)
#
#
# # Test setCompleted
# cjDAO.modelSetCompleted(modelID5, completionReason=cjDAO.CMPL_REASON_EOF,
# completionMsg="completion message")
#
# # --------------------------------------------------------------------------
# # Test the GetResultsAndStatus call
# results = cjDAO.modelsGetResultAndStatus([modelID1, modelID2, modelID3,
# modelID4, modelID5])
# assert (len(results) == 5)
# for (modelID, results, status, updateCounter, numRecords,
# completionReason, completionMsg, engParamsHash,
# engMatured) in results:
# if modelID == modelID1:
# assert (status == cjDAO.STATUS_RUNNING)
# assert (updateCounter == 0)
# elif modelID == modelID2:
# assert (results == 'hi there')
# assert (updateCounter == 1)
# elif modelID == modelID3:
# assert (numRecords == 110)
# assert (updateCounter == 2)
# elif modelID == modelID4:
# assert (updateCounter == 1)
# assert (results == 'bye')
# assert (numRecords == 42)
# elif modelID == modelID5:
# assert (updateCounter == 2)
# assert (results == 'hello')
# assert (numRecords == 4)
# assert (status == cjDAO.STATUS_COMPLETED)
# assert (completionReason == cjDAO.CMPL_REASON_EOF)
# assert (completionMsg == "completion message")
# else:
# assert (False)
#
# # --------------------------------------------------------------------------
# # Test the ModelsInfo call
# mInfos = cjDAO.modelsInfo([modelID1, modelID2, modelID3,
# modelID4, modelID5])
# assert (len(results) == 5)
# for info in mInfos:
# modelID = info.modelId
# if modelID == modelID1:
# assert (info.status == cjDAO.STATUS_RUNNING)
# assert (info.updateCounter == 0)
# elif modelID == modelID2:
# assert (info.results == 'hi there')
# assert (info.updateCounter == 1)
# elif modelID == modelID3:
# assert (info.numRecords == 110)
# assert (info.updateCounter == 2)
# elif modelID == modelID4:
# assert (info.updateCounter == 1)
# assert (info.results == 'bye')
# assert (info.numRecords == 42)
# elif modelID == modelID5:
# assert (info.updateCounter == 2)
# assert (info.results == 'hello')
# assert (info.numRecords == 4)
# assert (info.status == cjDAO.STATUS_COMPLETED)
# assert (info.completionReason == cjDAO.CMPL_REASON_EOF)
# assert (info.completionMsg == "completion message")
# else:
# assert (False)
#
#
# # Test the GetUpdateCounters call
# results = cjDAO.modelsGetUpdateCounters(jobID)
# print " all models update counters:", results
# expResults = set(((modelID1, 0), (modelID2, 1), (modelID3, 2),
# (modelID4, 1), (modelID5, 2), (modelID6, 0)))
# diff = expResults.symmetric_difference(results)
# assert (len(diff) == 0)
#
#
# # -------------------------------------------------------------------
# # Test the model orphan logic
# for modelID in [modelID1, modelID2, modelID3, modelID4, modelID5, modelID6]:
# cjDAO.modelUpdateResults(modelID, results="hi there")
# orphanedModel = cjDAO.modelAdoptNextOrphan(jobID, maxUpdateInterval=10.0)
# if orphanedModel is not None:
# print "Unexpected orphan: ", orphanedModel
# assert (orphanedModel is None)
# print "Waiting 2 seconds for model to expire..."
# time.sleep(2)
# orphanedModel = cjDAO.modelAdoptNextOrphan(jobID, maxUpdateInterval=1.0)
# assert (orphanedModel is not None)
# print "Adopted model", orphanedModel
#
# print "\nAll tests pass."
helpString = \
"""%prog [options]
This script runs the ClientJobsDAO as a command line tool, for executing
unit tests or for obtaining specific information about the ClientJobsDAO
required for code written in languages other than python.
"""
if __name__ == "__main__":
"""
Launch the ClientJobsDAO from the command line. This can be done to obtain
specific information about the ClientJobsDAO when languages other than python
(i.e. Java) are used.
"""
# Parse command line options
parser = OptionParser(helpString)
parser.add_option("--getDBName", action="store_true", default=False,
help="Print the name of the database that will be used to stdout "
" [default: %default]")
(options, args) = parser.parse_args(sys.argv[1:])
if len(args) > 0:
parser.error("Didn't expect any arguments.")
# Print DB name?
if options.getDBName:
cjDAO = ClientJobsDAO()
print cjDAO.dbName
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\nimport pyclbr\nimport os\nfrom operator import itemgetter\n\n\ndef show_class(name, class_data):\n print('Class:', name)\n filename = os.path.basename(class_data.file)\n print(' File: {0} [{1}]'.format(... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\nimport pyclbr\nimport os\nfrom operator import itemgetter\n\n\ndef show_class(name, class_data):\n print('Class:', name)\n filename = os.path.basename(class_data.file)\n print(' File: {0... | ```python
import pyclbr
import os
from operator import itemgetter
def show_class(name, class_data):
print('Class:', name)
filename = os.path.basename(class_data.file)
print(' File: {0} [{1}]'.format(
filename, class_data.lineno))
show_super_classes(name, class_data)
show_methods(name, class_data)
print()
def show_methods(class_name, class_data):
for name, lineno in sorted(class_data.methods.items(),
key=itemgetter(1)):
print(' Method: {0} [{1}]'.format(name, lineno))
def show_super_classes(name, class_data):
super_class_names = []
for super_class in class_data.super:
if super_class == 'object':
continue
if isinstance(super_class, str):
super_class_names.append(super_class)
else:
super_class_names.append(super_class.name)
if super_class_names:
print(' Super classes:', super_class_names)
example_data = pyclbr.readmodule('pyclbr_example')
for name, class_data in sorted(example_data.items(),
key=lambda x: x[1].lineno):
show_class(name, class_data)
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n'''\nEntry point module to start the interactive console.\n'''\nfrom _pydev_bundle._pydev_getopt import gnu_getopt\nfrom _pydev_comm.rpc import make_rpc_client, start_rpc_server, start_rpc_server_and_make_client\nfrom _pyd... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n'''\nEntry point module to start the interactive console.\n'''\nfrom _pydev_bundle._pydev_getopt import gnu_getopt\nfrom _pydev_comm.rpc import make_rpc_client, start_rpc_server, start_rpc_server_and_make_c... | ```python
'''
Entry point module to start the interactive console.
'''
from _pydev_bundle._pydev_getopt import gnu_getopt
from _pydev_comm.rpc import make_rpc_client, start_rpc_server, start_rpc_server_and_make_client
from _pydev_imps._pydev_saved_modules import thread
start_new_thread = thread.start_new_thread
try:
from code import InteractiveConsole
except ImportError:
from _pydevd_bundle.pydevconsole_code_for_ironpython import InteractiveConsole
import os
import sys
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import INTERACTIVE_MODE_AVAILABLE, dict_keys
from _pydevd_bundle.pydevd_utils import save_main_module
from _pydev_bundle import fix_getpass
fix_getpass.fix_getpass()
from _pydev_bundle.pydev_imports import _queue
try:
import __builtin__
except:
import builtins as __builtin__ # @UnresolvedImport
from _pydev_bundle.pydev_stdin import BaseStdIn
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface
from _pydev_bundle.pydev_console_types import Command
IS_PYTHON_3_ONWARDS = sys.version_info[0] >= 3
IS_PY24 = sys.version_info[0] == 2 and sys.version_info[1] == 4
try:
try:
execfile #Not in Py3k
except NameError:
from _pydev_bundle.pydev_imports import execfile
__builtin__.execfile = execfile
except:
pass
# Pull in runfile, the interface to UMD that wraps execfile
from _pydev_bundle.pydev_umd import runfile, _set_globals_function
if sys.version_info[0] >= 3:
import builtins # @UnresolvedImport
builtins.runfile = runfile
else:
import __builtin__
__builtin__.runfile = runfile
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, mainThread, connect_status_queue=None, rpc_client=None):
BaseInterpreterInterface.__init__(self, mainThread, connect_status_queue, rpc_client)
self.namespace = {}
self.save_main()
self.interpreter = InteractiveConsole(self.namespace)
self._input_error_printed = False
def save_main(self):
m = save_main_module('<input>', 'pydevconsole')
self.namespace = m.__dict__
try:
self.namespace['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
def do_add_exec(self, codeFragment):
command = Command(self.interpreter, codeFragment)
command.run()
return command.more
def get_namespace(self):
return self.namespace
def close(self):
sys.exit(0)
class _ProcessExecQueueHelper:
_debug_hook = None
_return_control_osc = False
def set_debug_hook(debug_hook):
_ProcessExecQueueHelper._debug_hook = debug_hook
def activate_mpl_if_already_imported(interpreter):
if interpreter.mpl_modules_for_patching:
for module in dict_keys(interpreter.mpl_modules_for_patching):
if module in sys.modules:
activate_function = interpreter.mpl_modules_for_patching.pop(module)
activate_function()
def init_set_return_control_back(interpreter):
from pydev_ipython.inputhook import set_return_control_callback
def return_control():
''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find
out if they should cede control and return '''
if _ProcessExecQueueHelper._debug_hook:
# Some of the input hooks check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
# XXX: Eventually the inputhook code will have diverged enough
# from the IPython source that it will be worthwhile rewriting
# it rather than pretending to maintain the old API
_ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc
if _ProcessExecQueueHelper._return_control_osc:
return True
if not interpreter.exec_queue.empty():
return True
return False
set_return_control_callback(return_control)
def init_mpl_in_console(interpreter):
init_set_return_control_back(interpreter)
if not INTERACTIVE_MODE_AVAILABLE:
return
activate_mpl_if_already_imported(interpreter)
from _pydev_bundle.pydev_import_hook import import_hook_manager
for mod in dict_keys(interpreter.mpl_modules_for_patching):
import_hook_manager.add_module_name(mod, interpreter.mpl_modules_for_patching.pop(mod))
def process_exec_queue(interpreter):
init_mpl_in_console(interpreter)
from pydev_ipython.inputhook import get_inputhook
while 1:
# Running the request may have changed the inputhook in use
inputhook = get_inputhook()
if _ProcessExecQueueHelper._debug_hook:
_ProcessExecQueueHelper._debug_hook()
if inputhook:
try:
# Note: it'll block here until return_control returns True.
inputhook()
except:
import traceback;traceback.print_exc()
try:
try:
code_fragment = interpreter.exec_queue.get(block=True, timeout=1/20.) # 20 calls/second
except _queue.Empty:
continue
if hasattr(code_fragment, '__call__'):
# It can be a callable (i.e.: something that must run in the main
# thread can be put in the queue for later execution).
code_fragment()
else:
more = interpreter.add_exec(code_fragment)
except KeyboardInterrupt:
interpreter.buffer = None
continue
except SystemExit:
raise
except:
type, value, tb = sys.exc_info()
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
if 'IPYTHONENABLE' in os.environ:
IPYTHON = os.environ['IPYTHONENABLE'] == 'True'
else:
IPYTHON = True
try:
try:
exitfunc = sys.exitfunc
except AttributeError:
exitfunc = None
if IPYTHON:
from _pydev_bundle.pydev_ipython_console import InterpreterInterface
if exitfunc is not None:
sys.exitfunc = exitfunc
else:
try:
delattr(sys, 'exitfunc')
except:
pass
except:
IPYTHON = False
pass
#=======================================================================================================================
# _DoExit
#=======================================================================================================================
def do_exit(*args):
'''
We have to override the exit because calling sys.exit will only actually exit the main thread,
and as we're in a Xml-rpc server, that won't work.
'''
try:
import java.lang.System
java.lang.System.exit(1)
except ImportError:
if len(args) == 1:
os._exit(args[0])
else:
os._exit(0)
def enable_thrift_logging():
"""Sets up `thriftpy` logger
The logger is used in `thriftpy/server.py` for logging exceptions.
"""
import logging
# create logger
logger = logging.getLogger('_shaded_thriftpy')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def create_server_handler_factory(interpreter):
def server_handler_factory(rpc_client):
interpreter.rpc_client = rpc_client
return interpreter
return server_handler_factory
def start_server(port):
if port is None:
port = 0
# 0. General stuff
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = do_exit
from pydev_console.protocol import PythonConsoleBackendService, PythonConsoleFrontendService
enable_thrift_logging()
server_service = PythonConsoleBackendService
client_service = PythonConsoleFrontendService
# 1. Start Python console server
# `InterpreterInterface` implements all methods required for `server_handler`
interpreter = InterpreterInterface(threading.currentThread())
# Tell UMD the proper default namespace
_set_globals_function(interpreter.get_namespace)
server_socket = start_rpc_server_and_make_client('', port, server_service, client_service, create_server_handler_factory(interpreter))
# 2. Print server port for the IDE
_, server_port = server_socket.getsockname()
print(server_port)
# 3. Wait for IDE to connect to the server
process_exec_queue(interpreter)
def start_client(host, port):
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = do_exit
from pydev_console.protocol import PythonConsoleBackendService, PythonConsoleFrontendService
enable_thrift_logging()
client_service = PythonConsoleFrontendService
client, server_transport = make_rpc_client(client_service, host, port)
interpreter = InterpreterInterface(threading.currentThread(), rpc_client=client)
# we do not need to start the server in a new thread because it does not need to accept a client connection, it already has it
# Tell UMD the proper default namespace
_set_globals_function(interpreter.get_namespace)
server_service = PythonConsoleBackendService
# `InterpreterInterface` implements all methods required for the handler
server_handler = interpreter
start_rpc_server(server_transport, server_service, server_handler)
process_exec_queue(interpreter)
def get_interpreter():
try:
interpreterInterface = getattr(__builtin__, 'interpreter')
except AttributeError:
interpreterInterface = InterpreterInterface(None, None, threading.currentThread())
__builtin__.interpreter = interpreterInterface
print(interpreterInterface.get_greeting_msg())
return interpreterInterface
def get_completions(text, token, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
return interpreterInterface.getCompletions(text, token)
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
#Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole
#so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple
#representations of its classes).
#See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446:
#'Variables' and 'Expressions' views stopped working when debugging interactive console
import pydevconsole
sys.stdin = pydevconsole.BaseStdIn(sys.stdin)
# parse command-line arguments
optlist, _ = gnu_getopt(sys.argv, 'm:h:p', ['mode=', 'host=', 'port='])
mode = None
host = None
port = None
for opt, arg in optlist:
if opt in ('-m', '--mode'):
mode = arg
elif opt in ('-h', '--host'):
host = arg
elif opt in ('-p', '--port'):
port = int(arg)
if mode not in ('client', 'server'):
sys.exit(-1)
if mode == 'client':
if not port:
# port must be set for client
sys.exit(-1)
if not host:
from _pydev_bundle import pydev_localhost
host = client_host = pydev_localhost.get_localhost()
pydevconsole.start_client(host, port)
elif mode == 'server':
pydevconsole.start_server(port)
``` |
[
{
"content": "Repeat the code precisely:\n```python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Nicolas Bessi\n# Copyright 2013 Camptocamp SA\n#\n# This program is free software: you can redistribute it and/or modify\n# it un... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Nicolas Bessi\n# Copyright 2013 Camptocamp SA\n#\n# This program is free software: you can redistribute it and/or mo... | ```python
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import netsvc
from openerp.osv import orm
SELECTED_STATE = ('agreement_selected', 'Agreement selected')
AGR_SELECT = 'agreement_selected'
class purchase_order(orm.Model):
"""Add workflow behavior"""
_inherit = "purchase.order"
def __init__(self, pool, cr):
"""Add a new state value using PO class property"""
if SELECTED_STATE not in super(purchase_order, self).STATE_SELECTION:
super(purchase_order, self).STATE_SELECTION.append(SELECTED_STATE)
return super(purchase_order, self).__init__(pool, cr)
def select_agreement(self, cr, uid, agr_id, context=None):
"""Pass PO in state 'Agreement selected'"""
if isinstance(agr_id, (list, tuple)):
assert len(agr_id) == 1
agr_id = agr_id[0]
wf_service = netsvc.LocalService("workflow")
return wf_service.trg_validate(uid, 'purchase.order',
agr_id, 'select_agreement', cr)
def po_tender_agreement_selected(self, cr, uid, ids, context=None):
"""Workflow function that write state 'Agreement selected'"""
return self.write(cr, uid, ids, {'state': AGR_SELECT},
context=context)
class purchase_order_line(orm.Model):
"""Add make_agreement function"""
_inherit = "purchase.order.line"
# Did you know a good way to supress SQL constraint to add
# Python constraint...
_sql_constraints = [
('quantity_bid', 'CHECK(true)',
'Selected quantity must be less or equal than the quantity in the bid'),
]
def _check_quantity_bid(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.order_id.framework_agreement_id:
continue
if line.product_id.type == 'product' and not line.quantity_bid <= line.product_qty:
return False
return True
_constraints = [
(_check_quantity_bid,
'Selected quantity must be less or equal than the quantity in the bid',
[])
]
def _agreement_data(self, cr, uid, po_line, origin, context=None):
"""Get agreement values from PO line
:param po_line: Po line records
:returns: agreement dict to be used by orm.Model.create
"""
vals = {}
vals['supplier_id'] = po_line.order_id.partner_id.id
vals['product_id'] = po_line.product_id.id
vals['quantity'] = po_line.product_qty
vals['delay'] = po_line.product_lead_time
vals['origin'] = origin if origin else False
return vals
def make_agreement(self, cr, uid, line_id, origin, context=None):
""" generate a draft framework agreement
:returns: a record of LTA
"""
agr_model = self.pool['framework.agreement']
if isinstance(line_id, (list, tuple)):
assert len(line_id) == 1
line_id = line_id[0]
current = self.browse(cr, uid, line_id, context=context)
vals = self._agreement_data(cr, uid, current, origin, context=context)
agr_id = agr_model.create(cr, uid, vals, context=context)
return agr_model.browse(cr, uid, agr_id, context=context)
``` |
[
{
"content": "```python\nimport pyb\nfrom pyb import Pin\nfrom pyb import ExtInt\n\n# We need to use global properties here as any allocation of a memory (aka declaration of a variable)\n# during the read cycle causes non-acceptable delay and we are loosing data than\nnc = None\ngnd = None\nvcc = None\ndata = N... | [
{
"content": "<|memory_start|>```python\nimport pyb\nfrom pyb import Pin\nfrom pyb import ExtInt\n\n# We need to use global properties here as any allocation of a memory (aka declaration of a variable)\n# during the read cycle causes non-acceptable delay and we are loosing data than\nnc = None\ngnd = None\nvcc ... | ```python
import pyb
from pyb import Pin
from pyb import ExtInt
# We need to use global properties here as any allocation of a memory (aka declaration of a variable)
# during the read cycle causes non-acceptable delay and we are loosing data than
nc = None
gnd = None
vcc = None
data = None
timer = None
micros = None
FALL_EDGES = 42 # we have 42 falling edges during data receive
times = list(range(FALL_EDGES))
index = 0
# The interrupt handler
def edge(line):
global index
global times
global micros
times[index] = micros.counter()
if index < (FALL_EDGES - 1): # Avoid overflow of the buffer in case of any noise on the line
index += 1
def init(timer_id = 2, nc_pin = 'Y3', gnd_pin = 'Y4', vcc_pin = 'Y1', data_pin = 'Y2'):
global nc
global gnd
global vcc
global data
global micros
global timer
# Leave the pin unconnected
if nc_pin is not None:
nc = Pin(nc_pin)
nc.init(Pin.OUT_OD)
nc.high()
# Make the pin work as GND
if gnd_pin is not None:
gnd = Pin(gnd_pin)
gnd.init(Pin.OUT_PP)
gnd.low()
# Make the pin work as power supply
if vcc_pin is not None:
vcc = Pin(vcc_pin)
vcc.init(Pin.OUT_PP)
vcc.high()
# Configure the pid for data communication
data = Pin(data_pin)
# Save the ID of the timer we are going to use
timer = timer_id
# setup the 1uS timer
micros = pyb.Timer(timer, prescaler=83, period=0x3fffffff) # 1MHz ~ 1uS
# Prepare interrupt handler
ExtInt(data, ExtInt.IRQ_FALLING, Pin.PULL_UP, None)
ExtInt(data, ExtInt.IRQ_FALLING, Pin.PULL_UP, edge)
# Start signal
def do_measurement():
global nc
global gnd
global vcc
global data
global micros
global timer
global index
# Send the START signal
data.init(Pin.OUT_PP)
data.low()
micros.counter(0)
while micros.counter() < 25000:
pass
data.high()
micros.counter(0)
while micros.counter() < 20:
pass
# Activate reading on the data pin
index = 0
data.init(Pin.IN, Pin.PULL_UP)
# Till 5mS the measurement must be over
pyb.delay(5)
# Parse the data read from the sensor
def process_data():
global times
i = 2 # We ignore the first two falling edges as it is a respomse on the start signal
result_i = 0
result = list([0, 0, 0, 0, 0])
while i < FALL_EDGES:
result[result_i] <<= 1
if times[i] - times[i - 1] > 100:
result[result_i] += 1
if (i % 8) == 1:
result_i += 1
i += 1
[int_rh, dec_rh, int_t, dec_t, csum] = result
humidity = ((int_rh * 256) + dec_rh)/10
temperature = (((int_t & 0x7F) * 256) + dec_t)/10
if (int_t & 0x80) > 0:
temperature *= -1
comp_sum = int_rh + dec_rh + int_t + dec_t
if (comp_sum & 0xFF) != csum:
raise ValueError('Checksum does not match')
return (humidity, temperature)
def measure():
do_measurement()
if index != (FALL_EDGES -1):
raise ValueError('Data transfer failed: %s falling edges only' % str(index))
return process_data()
``` |
[
{
"content": "```python\nfrom django import forms\nfrom django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom account import models as amod\nfrom . import templater\nimport datetime\n\n\ndef process_request(request):\n '''Creates a reset password form to email ... | [
{
"content": "<|memory_start|>```python\nfrom django import forms\nfrom django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom account import models as amod\nfrom . import templater\nimport datetime\n\n\ndef process_request(request):\n '''Creates a reset passwor... | ```python
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from account import models as amod
from . import templater
import datetime
def process_request(request):
'''Creates a reset password form to email a unique link to the user'''
user = amod.User.objects.get(email=request.urlparams[0])
key = request.urlparams[1]
now = datetime.datetime.utcnow()
exp_date = user.password_reset_date.replace(tzinfo=None)
if key != user.password_reset_key or now > exp_date:
return HttpResponseRedirect('/account/password_reset_invalid')
form = PasswordForm()
if request.method == 'POST':
form = PasswordForm(request.POST)
if form.is_valid():
password = form.cleaned_data['password']
print(user)
print(password)
user.set_password(password)
user.save()
return HttpResponseRedirect('/account/password_reset/')
tvars = {
'form': form,
}
return templater.render_to_response(request, 'reset_password.html', tvars)
class PasswordForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput)
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project ro... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt i... | ```python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerOutboundRulesOperations(object):
"""LoadBalancerOutboundRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerOutboundRuleListResult"]
"""Gets all the outbound rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerOutboundRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.LoadBalancerOutboundRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerOutboundRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerOutboundRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/outboundRules'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
outbound_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.OutboundRule"
"""Gets the specified load balancer outbound rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param outbound_rule_name: The name of the outbound rule.
:type outbound_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OutboundRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.OutboundRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OutboundRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'outboundRuleName': self._serialize.url("outbound_rule_name", outbound_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('OutboundRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/outboundRules/{outboundRuleName}'} # type: ignore
``` |
[
{
"content": "```python\nfrom django.utils.translation import ugettext_lazy as _\nfrom horizon import exceptions\nfrom horizon import tabs\nimport json\nfrom crystal_dashboard.api import controllers as api\nfrom crystal_dashboard.dashboards.crystal import exceptions as sdsexception\nfrom crystal_dashboard.dashb... | [
{
"content": "<|memory_start|>```python\nfrom django.utils.translation import ugettext_lazy as _\nfrom horizon import exceptions\nfrom horizon import tabs\nimport json\nfrom crystal_dashboard.api import controllers as api\nfrom crystal_dashboard.dashboards.crystal import exceptions as sdsexception\nfrom crystal... | ```python
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
import json
from crystal_dashboard.api import controllers as api
from crystal_dashboard.dashboards.crystal import exceptions as sdsexception
from crystal_dashboard.dashboards.crystal.controllers.controllers import models as controllers_models
from crystal_dashboard.dashboards.crystal.controllers.controllers import tables as controllers_tables
from crystal_dashboard.dashboards.crystal.controllers.instances import tables as instances_tables
from crystal_dashboard.dashboards.crystal.controllers.instances import models as instances_models
class ControllersTab(tabs.TableTab):
table_classes = (controllers_tables.ControllersTable,)
name = _("Controllers")
slug = "controllers_table"
template_name = "crystal/controllers/controllers/_detail.html"
preload = False
response = None
def get_controllers_data(self):
try:
if not self.response:
self.response = api.get_all_controllers(self.request)
if 200 <= self.response.status_code < 300:
strobj = self.response.text
else:
error_message = 'Unable to get instances.'
raise sdsexception.SdsException(error_message)
except Exception as e:
strobj = "[]"
exceptions.handle(self.request, e.message)
instances = json.loads(strobj)
ret = []
for inst in instances:
ctrl = controllers_models.Controller(inst['id'], inst['controller_name'], inst['description'],
inst['class_name'], inst['instances'], inst['valid_parameters'])
ret.append(ctrl)
return ret
class InstancesTab(tabs.TableTab):
table_classes = (instances_tables.InstancesTable,)
name = _("Instances")
slug = "instances_table"
template_name = "crystal/controllers/instances/_detail.html"
preload = False
response = None
def get_instances_data(self):
instances = json.loads(api.get_all_instances(self.request).text)
return [instances_models.Instance(instance['id'], instance['controller'], instance['parameters'],
instance['description'], instance['status']) for instance in instances]
class ControllerTabs(tabs.TabGroup):
slug = "controllers_tabs"
tabs = (ControllersTab, InstancesTab,)
sticky = True
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2016, Maximilian Köhl <mail@koehlma.de>\n#\n# This program is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License version 3 as published by\n# the Free Software Foundation.\n#\n# T... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2016, Maximilian Köhl <mail@koehlma.de>\n#\n# This program is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License version 3 as published by\n# the Free Software Fou... | ```python
# -*- coding: utf-8 -*-
# Copyright (C) 2016, Maximilian Köhl <mail@koehlma.de>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals, division, absolute_import
import socket
import common
import uv
class TestPipe(common.TestCase):
def test_connect_bad(self):
def on_connect(request, status):
self.assert_not_equal(status, uv.StatusCodes.SUCCESS)
request.stream.close()
self.pipe = uv.Pipe()
self.pipe.connect(common.BAD_PIPE, on_connect=on_connect)
self.loop.run()
def test_sockname(self):
self.pipe = uv.Pipe()
self.pipe.bind(common.TEST_PIPE1)
self.assert_equal(self.pipe.sockname, common.TEST_PIPE1)
def test_peername(self):
def on_connect(request, status):
self.assert_equal(status, uv.StatusCodes.SUCCESS)
self.assert_equal(request.stream.peername, common.TEST_PIPE1)
request.stream.close()
def on_connection(handle, status):
self.assert_equal(status, uv.StatusCodes.SUCCESS)
handle.close()
self.pipe1 = uv.Pipe()
self.pipe1.bind(common.TEST_PIPE1)
self.pipe1.listen(on_connection=on_connection)
self.pipe2 = uv.Pipe()
self.pipe2.connect(common.TEST_PIPE1, on_connect=on_connect)
self.loop.run()
def test_no_pending_accept(self):
self.pipe = uv.Pipe()
self.assert_raises(uv.error.ArgumentError, self.pipe.pending_accept)
def test_closed(self):
self.pipe = uv.Pipe()
self.pipe.close()
self.assert_raises(uv.error.ClosedHandleError, self.pipe.open, 0)
self.assert_equal(self.pipe.pending_count, 0)
self.assert_equal(self.pipe.pending_type, None)
self.assert_raises(uv.error.ClosedHandleError, self.pipe.pending_accept)
self.assert_raises(uv.error.ClosedHandleError, self.pipe.pending_instances, 100)
with self.should_raise(uv.error.ClosedHandleError):
sockname = self.pipe.sockname
with self.should_raise(uv.error.ClosedHandleError):
peername = self.pipe.peername
self.assert_raises(uv.error.ClosedHandleError, self.pipe.bind, '')
self.assert_raises(uv.error.ClosedHandleError, self.pipe.connect, '')
def test_family(self):
self.pipe = uv.Pipe()
if uv.common.is_win32:
self.assert_is(self.pipe.family, None)
else:
self.assert_is(self.pipe.family, socket.AF_UNIX)
@common.skip_platform('win32')
def test_pipe_open(self):
unix_socket = socket.socket(family=socket.AF_UNIX)
self.pipe = uv.Pipe()
self.pipe.open(unix_socket.fileno())
self.assert_equal(self.pipe.fileno(), unix_socket.fileno())
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import find_packages, setup\n\nimport pycolorname\n\nwith open('requirements.txt') as requirements:\n required = requirements.read().splitlines()\n\nwith open(... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import find_packages, setup\n\nimport pycolorname\n\nwith open('requirements.txt') as requirements:\n required = requirements.read().splitlines... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
import pycolorname
with open('requirements.txt') as requirements:
required = requirements.read().splitlines()
with open('test-requirements.txt') as requirements:
test_required = requirements.read().splitlines()
if __name__ == "__main__":
setup(name='pycolorname',
version=pycolorname.__version__,
description='Provides common color systems.',
author="DrTrigon",
author_email="dr.trigon@surfeu.ch",
maintainer="AbdealiJK",
maintainer_email='abdealikothari@gmail.com',
url='https://github.com/AbdealiJK/pycolorname',
platforms='any',
packages=find_packages(exclude=["build.*", "*.tests.*", "*.tests"]),
install_requires=required,
tests_require=test_required,
license="MIT",
package_data={'pycolorname': ["VERSION", "data/*"]},
# from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: Implementation :: CPython'])
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n#!/usr/bin/env python\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the Lic... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either vers... | ```python
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
"""
Test keyboard inputs through spice. Send keys through qemu monitor to client.
Requires
--------
- Deployed PyGTK on guest VM.
Presumes the numlock state at startup is 'OFF'.
"""
import logging
from spice.lib import act
from spice.lib import stest
from spice.lib import utils
logger = logging.getLogger(__name__)
#
#def test_leds_migration(client_vm, guest_vm, guest_session, params):
# """
# Check LEDS after migration.
# Function sets LEDS (caps, num) to ON and send scancodes of "a" and
# "1 (num)" and expected to get keycodes of "A" and "1" after migration.
#
# Parameters
# ----------
# client_vm :
# Vm object.
# guest_vm :
# Vm object.
# guest_session :
# Ssh session to guest VM.
# params : virttest.utils_params.Params
# Dictionary with the test parameters.
# """
# # Turn numlock on RHEL6 on before the test begins:
# grep_ver_cmd = "grep -o 'release [[:digit:]]' /etc/redhat-release"
# rhel_ver = guest_session.cmd(grep_ver_cmd).strip()
# logging.info("RHEL version: #{0}#".format(rhel_ver))
# if rhel_ver == "release 6":
# client_vm.send_key('num_lock')
# #Run PyGTK form catching KeyEvents on guest
# run_test_form(guest_session, params)
# utils_spice.wait_timeout(3)
# # Tested keys before migration
# test_keys = ['a', 'kp_1', 'caps_lock', 'num_lock', 'a', 'kp_1']
# logging.info("Sending leds keys to client machine before migration")
# for key in test_keys:
# client_vm.send_key(key)
# utils_spice.wait_timeout(0.3)
# guest_vm.migrate()
# utils_spice.wait_timeout(8)
# #Tested keys after migration
# test_keys = ['a', 'kp_1', 'caps_lock', 'num_lock']
# logging.info("Sending leds keys to client machine after migration")
# for key in test_keys:
# client_vm.send_key(key)
# utils_spice.wait_timeout(0.3)
# utils_spice.wait_timeout(30)
#expected_keysyms = [97, 65457, 65509, 65407, 65, 65436, 65, 65436,
# 65509, 65407]
#
def test_seq(test, send_keys, expected_keysyms):
ssn = act.klogger_start(test.vmi_g)
for i in send_keys:
test.vm_c.send_key(i)
logged_keys = act.klogger_stop(test.vmi_g, ssn)
keysyms = map(lambda (_, keysym): keysym, logged_keys)
assert keysyms == expected_keysyms
ssn.close()
def run(vt_test, test_params, env):
"""Test for testing keyboard inputs through spice.
Parameters
----------
vt_test : avocado.core.plugins.vt.VirtTest
QEMU test object.
test_params : virttest.utils_params.Params
Dictionary with the test parameters.
env : virttest.utils_env.Env
Dictionary with test environment.
"""
test = stest.ClientGuestTest(vt_test, test_params, env)
cfg = test.cfg
#test.cmd_g.install_rpm(cfg.xev)
act.x_active(test.vmi_c)
act.x_active(test.vmi_g)
ssn = act.new_ssn(test.vmi_c, dogtail_ssn=test.vmi_c.vm.is_rhel8())
act.rv_connect(test.vmi_c, ssn)
act.rv_chk_con(test.vmi_c)
if cfg.ttype == 'type_and_func_keys':
"""Test typewriter and functional keys."""
keycodes = range(1, 69)
# Skip Ctrl, RSH, LSH, PtScr, Alt, CpsLk
skip = [29, 42, 54, 55, 56, 58]
send_keys = [hex(k) for k in keycodes if k not in skip]
expected_keysyms = [65307, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 45,
61, 65288, 65289, 113, 119, 101, 114, 116, 121,
117, 105, 111, 112, 91, 93, 65293, 97, 115, 100,
102, 103, 104, 106, 107, 108, 59, 39, 96, 92, 122,
120, 99, 118, 98, 110, 109, 44, 46, 47, 32, 65470,
65471, 65472, 65473, 65474, 65475, 65476, 65477,
65478, 65479]
test_seq(test, send_keys, expected_keysyms)
if cfg.ttype == 'leds_and_esc_keys':
escaped = ['insert', 'delete', 'home', 'end', 'pgup', 'pgdn', 'up',
'down', 'right', 'left']
expected_keysyms = [65379, 65535, 65360, 65367,
65365, 65366, 65362, 65364, 65363, 65361]
test_seq(test, escaped, expected_keysyms)
shortcuts = ['a', 'shift-a', 'shift_r-a', 'ctrl-a', 'ctrl-c', 'ctrl-v',
'alt-x']
expected_keysyms = [97, 65505, 65, 65506, 65, 65507, 97, 65507, 99,
65507, 118, 65513, 120]
test_seq(test, shortcuts, expected_keysyms)
leds = ['a', 'caps_lock', 'a', 'caps_lock', 'num_lock', 'kp_1',
'num_lock', 'kp_1']
expected_keysyms = [97, 65509, 65, 65509, 65407, 65457, 65407, 65436]
test_seq(test, leds, expected_keysyms)
if cfg.ttype == 'nonus_layout':
cmd = utils.Cmd("setxkbmap", "cz")
act.run(test.vmi_g, cmd)
keys = ['7', '8', '9', '0', 'alt_r-x', 'alt_r-c', 'alt_r-v']
expected_keysyms = [253, 225, 237, 233, 65027, 35, 65027, 38, 65027,
64]
test_seq(test, keys, expected_keysyms)
cmd = utils.Cmd("setxkbmap", "de")
act.run(test.vmi_g, cmd)
keys = ['minus', '0x1a', 'alt_r-q', 'alt_r-m']
expected_keysyms = [223, 252, 65027, 64, 65027, 181]
test_seq(test, keys, expected_keysyms)
cmd = utils.Cmd("setxkbmap", "us")
act.run(test.vmi_g, cmd)
if cfg.ttype == "leds_migration":
if test.vm_c.is_rhel6():
test.vm_c.send_key('num_lock')
keys1 = ['a', 'kp_1', 'caps_lock', 'num_lock', 'a', 'kp_1']
keys2 = ['a', 'kp_1', 'caps_lock', 'num_lock']
expected_keysyms = ['97', '65457', '65509', '65407', '65', '65436',
'65', '65436', '65509', '65407']
ssn = act.klogger_start(test.vmi_g)
for i in keys1:
test.vm_c.send_key(i)
test.vm_g.migrate()
for i in keys2:
test.vm_c.send_key(i)
logged_keys = act.klogger_stop(test.vmi_g, ssn)
ssn.close()
keysyms = [key[1] for key in logged_keys]
assert keysyms == expected_keysyms
""" Useful links
https://code.google.com/archive/p/key-mon/
http://www.shallowsky.com/software/crikey/pykey-0.1
https://www.berrange.com/tags/key-codes/
ftp://ftp.suse.com/pub/people/sbrabec/keyboards/
http://python-evdev.readthedocs.io/en/latest/index.html
http://python-xlib.sourceforge.net/doc/html/python-xlib_16.html#SEC15
https://en.wikipedia.org/wiki/Evdev
http://python-evdev.readthedocs.io/en/latest/apidoc.html#module-evdev.ecodes
https://www.vmware.com/support/ws4/doc/devices_linux_kb_ws.html
http://www.madore.org/~david/linux/linux-old.html
http://www.comptechdoc.org/os/linux/howlinuxworks/linux_hlkeycodes.html
https://wiki.ubuntu.com/Hotkeys/Architecture
http://www.tcl.tk/man/tcl8.4/TkCmd/keysyms.htm
"""
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\nbl_info = {\n \"name\": \"BVH Cache Manager\",\n \"category\": \"Render\",\n \"description\":\"Easily delete cached BVH data!\",\n \"location\":\"Properties Editor > Render > BVH Cache Manager\",\n \"aut... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\nbl_info = {\n \"name\": \"BVH Cache Manager\",\n \"category\": \"Render\",\n \"description\":\"Easily delete cached BVH data!\",\n \"location\":\"Properties Editor > Render > BVH Cache Manag... | ```python
bl_info = {
"name": "BVH Cache Manager",
"category": "Render",
"description":"Easily delete cached BVH data!",
"location":"Properties Editor > Render > BVH Cache Manager",
"author":"Gregory Bolet",
"version":"001",
"warning":"Alpha Version"
}
import bpy
import os
import shutil
cacheDirectory = ""
class InterfacePanel(bpy.types.Panel): #[ref: Panel(bpy_struct)]
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context= "render"
bl_label = "BVH Cache Manager"
#this will create some UI elements
print("***Staring Interface Panel...***")
bpy.types.Scene.manualDirectory = bpy.props.StringProperty(name = "Cache Folder", default = "You can leave me blank!",
description = "Manually select cache folder directory",
subtype = 'DIR_PATH'
)
print("***Interface Ready!***")
#draw function gets called A LOT,
#object edits cannot be performed here, only UI updates
def draw(self, context):
layout = self.layout
col = layout.column(align=True) #adds a column to the UI
col.label("Manual Entry:")
col.prop(context.scene, 'manualDirectory', expand=False)
print("Manual Directory IS:"+ context.scene.manualDirectory)
col.label("")
col.operator("blender.execute",text="Clear Cache")
return None
class ClearCacheButton(bpy.types.Operator):
bl_idname = "blender.execute"
bl_label = "Clear BVH Cache"
bl_options = {"UNDO"}
def __init__(self):
global cacheDirectory
from sys import platform as _platform
manualDir = bpy.context.scene.manualDirectory
if (os.path.isdir(manualDir) == False and manualDir != ""):
print("Invalid manual entry directory. Using default cache folder...")
elif (os.path.isdir(manualDir) == False and manualDir == ""):
print("Looking for default cache folder...")
if(manualDir != "" and os.path.isdir(manualDir)):
cacheDirectory = manualDir[:-1] #removes extra slash
elif _platform == "linux" or _platform == "linux2":
#This will always work on Linux
#$HOME/.config/blender/2.76/
cacheDirectory += "$HOME/.config/blender/"
cacheDirectory += '{:.4}'.format(bpy.app.version_string)
cacheDirectory += "/cache"
elif _platform == "darwin":
#This will always work on Mac OSX
#/Users/$USER/Library/Application Support/Blender/2.76/
cacheDirectory += "~/Library/Application Support/Blender/"
cacheDirectory += '{:.4}'.format(bpy.app.version_string)
cacheDirectory += "/cache"
elif _platform == "win32":
#this always works on Windows machines
#C:\Documents and Settings\$USERNAME\AppData\Roaming\Blender Foundation\Blender\2.76\
cacheDirectory += os.getenv('APPDATA')
cacheDirectory += "\Blender Foundation\Blender\\"
cacheDirectory += '{:.4}'.format(bpy.app.version_string)
cacheDirectory += "\cache"
print("User Cache Directory: "+cacheDirectory)
return None;
def clearCache(self):
global cacheDirectory
if(os.path.isdir(cacheDirectory)):
shutil.rmtree(cacheDirectory)
if(os.path.isdir(cacheDirectory) == False):
os.makedirs(cacheDirectory)
print("\nNo cache directory exists, creating one...")
print("New cache folder directory: "+cacheDirectory+"\n")
return None;
def execute(self, context):
global cacheDirectory
print("\nStarting process...")
self.clearCache()
cacheDirectory = ""
print("FINISHED! \n\n\n")
return {"FINISHED"}
#end invoke
def register():
bpy.utils.register_class(InterfacePanel)
bpy.utils.register_class(ClearCacheButton)
def unregister():
bpy.utils.unregister_class(ClearCacheButton)
bpy.utils.unregister_class(InterfacePanel)
if __name__ == "__main__":
register()
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n\"\"\"Download files with progress indicators.\n\"\"\"\nimport cgi\nimport logging\nimport mimetypes\nimport os\n\nfrom pipenv.patched.notpip._vendor import requests\nfrom pipenv.patched.notpip._vendor.requests.models import CON... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n\"\"\"Download files with progress indicators.\n\"\"\"\nimport cgi\nimport logging\nimport mimetypes\nimport os\n\nfrom pipenv.patched.notpip._vendor import requests\nfrom pipenv.patched.notpip._vendor.requests.m... | ```python
"""Download files with progress indicators.
"""
import cgi
import logging
import mimetypes
import os
from pipenv.patched.notpip._vendor import requests
from pipenv.patched.notpip._vendor.requests.models import CONTENT_CHUNK_SIZE
from pipenv.patched.notpip._internal.models.index import PyPI
from pipenv.patched.notpip._internal.network.cache import is_from_cache
from pipenv.patched.notpip._internal.network.utils import response_chunks
from pipenv.patched.notpip._internal.utils.misc import (
format_size,
redact_auth_from_url,
splitext,
)
from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING
from pipenv.patched.notpip._internal.utils.ui import DownloadProgressProvider
if MYPY_CHECK_RUNNING:
from typing import Iterable, Optional
from pipenv.patched.notpip._vendor.requests.models import Response
from pipenv.patched.notpip._internal.models.link import Link
from pipenv.patched.notpip._internal.network.session import PipSession
logger = logging.getLogger(__name__)
def _get_http_response_size(resp):
# type: (Response) -> Optional[int]
try:
return int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
return None
def _prepare_download(
resp, # type: Response
link, # type: Link
progress_bar # type: str
):
# type: (...) -> Iterable[bytes]
total_length = _get_http_response_size(resp)
if link.netloc == PyPI.file_storage_domain:
url = link.show_url
else:
url = link.url_without_fragment
logged_url = redact_auth_from_url(url)
if total_length:
logged_url = '{} ({})'.format(logged_url, format_size(total_length))
if is_from_cache(resp):
logger.info("Using cached %s", logged_url)
else:
logger.info("Downloading %s", logged_url)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif is_from_cache(resp):
show_progress = False
elif not total_length:
show_progress = True
elif total_length > (40 * 1000):
show_progress = True
else:
show_progress = False
chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)
if not show_progress:
return chunks
return DownloadProgressProvider(
progress_bar, max=total_length
)(chunks)
def sanitize_content_filename(filename):
# type: (str) -> str
"""
Sanitize the "filename" value from a Content-Disposition header.
"""
return os.path.basename(filename)
def parse_content_disposition(content_disposition, default_filename):
# type: (str, str) -> str
"""
Parse the "filename" value from a Content-Disposition header, and
return the default filename if the result is empty.
"""
_type, params = cgi.parse_header(content_disposition)
filename = params.get('filename')
if filename:
# We need to sanitize the filename to prevent directory traversal
# in case the filename contains ".." path parts.
filename = sanitize_content_filename(filename)
return filename or default_filename
def _get_http_response_filename(resp, link):
# type: (Response, Link) -> str
"""Get an ideal filename from the given HTTP response, falling back to
the link filename if not provided.
"""
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
filename = parse_content_disposition(content_disposition, filename)
ext = splitext(filename)[1] # type: Optional[str]
if not ext:
ext = mimetypes.guess_extension(
resp.headers.get('content-type', '')
)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
return filename
def _http_get_download(session, link):
# type: (PipSession, Link) -> Response
target_url = link.url.split('#', 1)[0]
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
return resp
class Download(object):
def __init__(
self,
response, # type: Response
filename, # type: str
chunks, # type: Iterable[bytes]
):
# type: (...) -> None
self.response = response
self.filename = filename
self.chunks = chunks
class Downloader(object):
def __init__(
self,
session, # type: PipSession
progress_bar, # type: str
):
# type: (...) -> None
self._session = session
self._progress_bar = progress_bar
def __call__(self, link):
# type: (Link) -> Download
try:
resp = _http_get_download(self._session, link)
except requests.HTTPError as e:
logger.critical(
"HTTP error %s while getting %s", e.response.status_code, link
)
raise
return Download(
resp,
_get_http_response_filename(resp, link),
_prepare_download(resp, link, self._progress_bar),
)
``` |
[
{
"content": "Here is a code file:\n```python\nfrom tests.conftest import get_token\n\n\ndef edit_post(postsetup, token, title='title', content='content', tags='',\n action='save', url='/1/edit'):\n return postsetup.app.post(url, data={\n 'token': token,\n 'title': title,\n 'conte... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom tests.conftest import get_token\n\n\ndef edit_post(postsetup, token, title='title', content='content', tags='',\n action='save', url='/1/edit'):\n return postsetup.app.post(url, data={\n 'token': token,\n 'title': title,... | ```python
from tests.conftest import get_token
def edit_post(postsetup, token, title='title', content='content', tags='',
action='save', url='/1/edit'):
return postsetup.app.post(url, data={
'token': token,
'title': title,
'content': content,
'tags': tags,
'action': action
}, follow_redirects=True)
def test_anonymous_cannot_get_edit_page(postsetup):
r = postsetup.app.get('/1/edit', follow_redirects=True)
assert r.status_code == 403
assert 'You need to be logged in to view that content' in r.data
postsetup.done()
def test_anonymous_cannot_edit_post(postsetup):
postsetup.login()
r = postsetup.app.get('/1/edit')
token = get_token(r.data)
postsetup.app.get('/logout')
title = 'title-{0}'.format(token)
r = edit_post(postsetup, token, title=title)
assert r.status_code == 403
assert 'You need to be logged in to view that content' in r.data
r = postsetup.app.get('/')
assert title not in r.data
postsetup.done()
def test_anonymous_cannot_preview_post(postsetup):
postsetup.login()
r = postsetup.app.get('/1/edit')
token = get_token(r.data)
postsetup.app.get('/logout')
r = edit_post(postsetup, token)
assert r.status_code == 403
assert 'You need to be logged in to view that content' in r.data
postsetup.done()
def test_admin_can_get_edit_page(postsetup):
postsetup.login()
r = postsetup.app.get('/1/edit')
assert r.status_code == 200
assert 'Create Post' in r.data
postsetup.done()
def test_admin_can_edit_post(postsetup):
postsetup.login()
r = postsetup.app.get('/1/edit')
token = get_token(r.data)
title = 'title-{0}'.format(token)
r = edit_post(postsetup, token, title=title)
assert r.status_code == 200
assert title in r.data
assert 'Success' in r.data
postsetup.done()
def test_admin_can_preview_post(postsetup):
postsetup.login()
r = postsetup.app.get('/1/edit')
token = get_token(r.data)
title = 'title-{0}'.format(token)
r = edit_post(postsetup, token, title=title, action='preview')
assert r.status_code == 200
assert '<article>' in r.data
assert title in r.data
postsetup.done()
def test_invalid_token_prevents_creation(postsetup):
postsetup.login()
r = edit_post(postsetup, 'invalid-token')
assert 'Tokens did not match.' in r.data
postsetup.done()
def test_cannot_get_edit_page_for_nonexisting_post(postsetup):
postsetup.login()
r = postsetup.app.get('/2/edit')
assert r.status_code == 404
assert 'That post does not exist.' in r.data
postsetup.done()
def test_cannot_edit_nonexisting_post(postsetup):
postsetup.login()
r = postsetup.app.get('/1/edit')
token = get_token(r.data)
r = edit_post(postsetup, token, url='/2/edit')
assert r.status_code == 404
assert 'That post does not exist.' in r.data
postsetup.done()
def test_cannot_preview_nonexisting_post(postsetup):
postsetup.login()
r = postsetup.app.get('/1/edit')
token = get_token(r.data)
r = edit_post(postsetup, token, url='/2/edit', action='preview')
assert r.status_code == 404
assert 'That post does not exist.' in r.data
postsetup.done()
``` |
[
{
"content": "Here is some code:\n```python\nfrom sys import getsizeof\nimport collections\nimport pickle\n# import timeit\nfrom sys import getsizeof\nimport time\nimport csv\nimport io\noutput = io.StringIO()\n\n\nwith open('../dictionary_Nikon_Snippet_Surroundings.pickle', 'rb') as f:\n d = (pickle.load(f)... | [
{
"content": "Here is some code:\n<|memory_start|>```python\nfrom sys import getsizeof\nimport collections\nimport pickle\n# import timeit\nfrom sys import getsizeof\nimport time\nimport csv\nimport io\noutput = io.StringIO()\n\n\nwith open('../dictionary_Nikon_Snippet_Surroundings.pickle', 'rb') as f:\n d =... | ```python
from sys import getsizeof
import collections
import pickle
# import timeit
from sys import getsizeof
import time
import csv
import io
output = io.StringIO()
with open('../dictionary_Nikon_Snippet_Surroundings.pickle', 'rb') as f:
d = (pickle.load(f))
with open('/Users/sasa/Dropbox/1-Uni/CMPUT 692/Project/Code/Nikon_Entities.csv', 'rU') as csvfile:
reader = csv.reader(csvfile, dialect=csv.excel_tab)
#with open('/Users/sasa/Dropbox/1-Uni/CMPUT 692/Project/Code/Nikon_context.csv', 'w') as csvfile2:
with open('/Users/sasa/Dropbox/1-Uni/CMPUT 692/Project/Code/Nikon_context.pickle', 'wb') as picklefile:
#writer = csv.writer(csvfile2)
Nikon_Surr={}
for row in reader:
print(row[0])
#excelrow=[row[0]]
excelrow = []
surr={}
#print(d[row[0]])
surr= collections.Counter(d[row[0].lower()])
mc=(surr.most_common(6))
#print(mc)
for value in mc:
excelrow.append(value[0])
Nikon_Surr[str(row[0]).lower()]=excelrow
print(Nikon_Surr[str(row[0]).lower()])
pickle.dump(Nikon_Surr, picklefile)
#writer.writerow((excelrow))
``` |
[
{
"content": "Replicate the source code:\n```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDemo program of Hindi WordNet in Python. \n\nHere I demonstrate all the functionalities of the libraries, but note you can load only the pickle files which are necessary for your task rather than loading every pickle file. L... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDemo program of Hindi WordNet in Python. \n\nHere I demonstrate all the functionalities of the libraries, but note you can load only the pickle files which are necessary for your task rather than loading ever... | ```python
# -*- coding: utf-8 -*-
"""
Demo program of Hindi WordNet in Python.
Here I demonstrate all the functionalities of the libraries, but note you can load only the pickle files which are necessary for your task rather than loading every pickle file. Loading of pickle files takes time and memory. But once loaded, all your WordNet operations are just O(1) which means your WordNet lookup is no longer a bottleneck.
Developer: Siva Reddy <siva@sivareddy.in>
Please point http://sivareddy.in/downloads for others to find these python libraries.
"""
import pickle
word2Synset = pickle.load(open("WordSynsetDict.pk"))
synset2Onto = pickle.load(open("SynsetOnto.pk"))
synonyms = pickle.load(open("SynsetWords.pk"))
synset2Gloss = pickle.load(open("SynsetGloss.pk"))
synset2Hypernyms = pickle.load(open("SynsetHypernym.pk"))
synset2Hyponyms = pickle.load(open("SynsetHyponym.pk"))
synset2Hypernyms = pickle.load(open("SynsetHypernym.pk"))
word = "खाना".decode('utf-8', 'ignore')
while True:
if word2Synset.has_key(word):
synsets = word2Synset[word]
print "Word -->", "खाना "
for pos in synsets.keys():
print "POS Category -->", pos
for synset in synsets[pos]:
print "\t\tSynset -->", synset
if synonyms.has_key(synset):
print "\t\t\t Synonyms -->", synonyms[synset]
if synset2Gloss.has_key(synset):
print "\t\t\t Synset Gloss", synset2Gloss[synset]
if synset2Onto.has_key(synset):
print "\t\t\t Ontological Categories", synset2Onto[synset]
if synset2Hypernyms.has_key(synset):
print "\t\t\t Hypernym Synsets", synset2Hypernyms[synset]
if synset2Hyponyms.has_key(synset):
print "\t\t\t Hyponym Synsets", synset2Hyponyms[synset]
word = raw_input("Enter a word: ").decode("utf-8", "ignore")
``` |
[
{
"content": "Replicate the source code:\n```python\n\"\"\"Create random dog names\"\"\"\nimport random\nfrom functools import reduce\n\nfrom dog_list import DOGS\n\n\ndef word_iter(words):\n \"\"\"iterate on words\"\"\"\n word = []\n for char in words:\n if char in ' \\n\\t':\n if wo... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n\"\"\"Create random dog names\"\"\"\nimport random\nfrom functools import reduce\n\nfrom dog_list import DOGS\n\n\ndef word_iter(words):\n \"\"\"iterate on words\"\"\"\n word = []\n for char in words:\n if char in ' \\n\\t':\n ... | ```python
"""Create random dog names"""
import random
from functools import reduce
from dog_list import DOGS
def word_iter(words):
"""iterate on words"""
word = []
for char in words:
if char in ' \n\t':
if word:
yield ''.join(word)
word = []
else:
word.append(char)
def rand_elem(indexable):
"""return an element at random from an indexable collection"""
elem = random.randint(0, len(indexable) - 1)
return indexable[elem]
def hyphenate(phrase):
"""hyphenate a phrase correctly"""
return reduce(list.__add__, (x.split('-') for x in phrase.split()))
def mangle_dog_names(names):
"""return all those names mangled up"""
sylls = (hyphenate(x) for x in names.strip().splitlines())
return list(set(reduce(list.__add__, sylls)))
SYLLABLES = mangle_dog_names(DOGS)
def create_dog_name():
"""create a random dog name"""
num_syllables = random.randint(2, 4)
name = ''.join((rand_elem(SYLLABLES) for _ in range(num_syllables)))
return name.lower().capitalize()
if __name__ == '__main__':
print(create_dog_name())
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n# wsse/server/drf/tests/test_authentication.py\n# coding=utf-8\n# pywsse\n# Authors: Rushy Panchal, Naphat Sanguansin, Adam Libresco, Jérémie Lumbroso\n# Date: September 1st, 2016\n# Description: Test DRF WSSE Authentication backend.\n\nimport contextlib\n... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n# wsse/server/drf/tests/test_authentication.py\n# coding=utf-8\n# pywsse\n# Authors: Rushy Panchal, Naphat Sanguansin, Adam Libresco, Jérémie Lumbroso\n# Date: September 1st, 2016\n# Description: Test DRF WSSE Authentication backend.\n\nimp... | ```python
# wsse/server/drf/tests/test_authentication.py
# coding=utf-8
# pywsse
# Authors: Rushy Panchal, Naphat Sanguansin, Adam Libresco, Jérémie Lumbroso
# Date: September 1st, 2016
# Description: Test DRF WSSE Authentication backend.
import contextlib
import hashlib
import base64
import datetime
import itertools
from rest_framework.test import APITestCase, APIRequestFactory
from rest_framework import status
from django.contrib.auth.models import User
from django.utils import timezone
from wsse import utils, settings
from wsse.compat import reverse_lazy
from wsse.server.django.wsse.models import UserSecret
def setUpModule():
'''
Set up the module for running tests.
'''
# Set the nonce store to the Django store after saving the current settings
# so they can be restored later.
global __old_nonce_settings
__old_nonce_settings = (settings.NONCE_STORE, settings.NONCE_STORE_ARGS)
settings.NONCE_STORE = 'wsse.server.django.wsse.store.DjangoNonceStore'
settings.NONCE_STORE_ARGS = []
def tearDownModule():
'''
Tear down the module after running tests.
'''
# Restore the nonce settings.
settings.NONCE_STORE, settings.NONCE_STORE_ARGS = __old_nonce_settings
class WSSEAuthenticationTests(APITestCase):
'''
Test WSSE Authentication on the API.
'''
factory = APIRequestFactory()
base_url = reverse_lazy('api-test')
@contextlib.contextmanager
def http_auth(self, header):
'''
Perform HTTP authentication, through headers, in a request.
The headers are automatically cleared afterwards.
'''
kwargs = {utils._django_header(settings.REQUEST_HEADER): header}
self.client.credentials(**kwargs)
yield
# Clear the credential headers.
self.client.credentials()
@classmethod
def setUpClass(cls):
'''
Set up the class for running tests.
'''
cls.user = User.objects.create(username = 'test')
cls.user_secret = UserSecret.objects.create(user = cls.user)
@classmethod
def tearDownClass(cls):
'''
Tear down the class after running tests.
'''
cls.user.delete()
def make_header_values(self, user = None, username = None, timestamp = None,
digest = None, b64_digest = None, nonce = None, b64_nonce = None,
digest_algorithm = None):
'''
Make the header values from the given parameters.
:param user: (optional) user to authenticate with header
:type user: django.contrib.auth.models.User
:param username: (optional) username to provide in header
:type username: str
:param timestamp: (optional) timestamp to use in header
:type timestamp: str
:param digest: (optional) header digest
:type digest: bytes
:param b64_digest: (optional) header digest as base64
:type b64_digest: bytes
:param nonce: (optional) header nonce
:type nonce: bytes
:param b64_nonce: (optional) header nonce as base64
:type b64_nonce: bytes
:param digest_algorithm: (optional, default: sha256) digest algorithm to
use. It must be supported by hashlib.
:type digest_algorithm: str
:return: WSSE authentication header parts
:rtype: dict
'''
if user is None:
user = self.user
if username is None:
username = user.username
if timestamp is None:
now = timezone.now()
timestamp = now.strftime(settings.TIMESTAMP_FORMATS[0])
if nonce is None:
nonce = utils._random_string(length = settings.NONCE_LENGTH)
if digest is None:
digest = utils._b64_digest(nonce, timestamp, self.user_secret.secret,
algorithm = digest_algorithm)
if b64_nonce is None:
b64_nonce = base64.b64encode(utils._to_bytes(nonce))
if b64_digest is not None:
digest = b64_digest
header_values = {
'Username': username,
'PasswordDigest': utils._from_bytes(digest),
'Nonce': utils._from_bytes(b64_nonce),
'Created': timestamp
}
return header_values
def make_header(self, *args, **kwargs):
'''
Make the header from the given values.
:return: WSSE authentication header
:rtype: str
'''
header_values = self.make_header_values(*args, **kwargs)
header = (', '.join('{k}="{v}"'.format(
k = k, v = v) for k, v in header_values.items()))
return header
def test_valid_authentication(self):
'''
Authenticate with a valid username. The authentication should succeed.
'''
with self.http_auth(self.make_header()):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_valid_authentication_alternative_timestamp_format(self):
'''
Authenticate with a valid username, using an alternative timestamp format.
The authentication should succeed.
'''
now = timezone.now()
timestamp = now.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
with self.http_auth(self.make_header(timestamp = timestamp)):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_valid_authentication_alternative_headers(self):
'''
Make a valid authentication request. Use various permutations of the
header format.
'''
default_params = ['Username', 'PasswordDigest', 'Nonce', 'Created']
for params in itertools.permutations(default_params):
header_values = self.make_header_values()
header = ('UsernameToken ' + ', '.join('{k}="{v}"'.format(
k = param, v = header_values[param]) for param in params))
with self.http_auth(header):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_valid_authentication_drift(self):
'''
Authenticate with a valid username with drift on the timestamp.
The authentication should succeed.
'''
ts = (timezone.now() +
datetime.timedelta(seconds = settings.DRIFT_OFFSET - 1))
timestamp = ts.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
with self.http_auth(self.make_header()):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_no_authentication(self):
'''
Perform a request with no attempt at authentication. Authentication
should not succeed.
'''
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_wrong_format_authentication(self):
'''
Perform a request with incorrect authentication header format.
Authentication should not succeed.
'''
with self.http_auth('WrongFormat=27'):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_invalid_timestamp_authentication(self):
'''
Perform a request with an invalid timestamp.
Authentication should not succeed.
'''
with self.http_auth(self.make_header(timestamp = 'Nope')):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_invalid_timestamp_format_authentication(self):
'''
Perform a request with an invalid timestamp format.
Authentication should not succeed.
'''
now = timezone.now()
timestamp = now.strftime("%m/%d/%Y, %M:%S.%f")
with self.http_auth(self.make_header(timestamp = timestamp)):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_expired_timestamp(self):
'''
Authenticate an expired timestamp. The authentication should not succeed.
'''
now = timezone.now() - datetime.timedelta(
seconds = settings.TIMESTAMP_DURATION + 1)
timestamp = now.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
with self.http_auth(self.make_header(timestamp = timestamp)):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_future_timestamp(self):
'''
Authenticate a future timestamp. The authentication should not succeed.
'''
now = timezone.now() + datetime.timedelta(
seconds = settings.TIMESTAMP_DURATION + 1)
timestamp = now.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
with self.http_auth(self.make_header(timestamp = timestamp)):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_incorrect_username(self):
'''
Authenticate with an incorrect username. The authetication should not
succeed.
'''
with self.http_auth(self.make_header(username = 'nope')):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_invalid_b64_nonce(self):
'''
Authenticate with a nonce that is not base64. The authentication should not
succeed.
'''
with self.http_auth(self.make_header(b64_nonce = '?????????')):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_short_nonce(self):
'''
Authenticate with a nonce that is fewer than 8 characters. The
authentication should not succeed.
'''
with self.http_auth(self.make_header(b64_nonce = 'short')):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_long_nonce(self):
'''
Authenticate with a nonce that is longer than 32 characters. The
authentication should not succeed.
'''
with self.http_auth(self.make_header(b64_nonce = 'a' * 72)):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_authenticate_sha1(self):
'''
Authenticate with a valid header, but calculate the digest using SHA-1.
The authentication should not succeed.
'''
with self.http_auth(self.make_header(
digest_algorithm = 'sha1')):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_invalid_digest(self):
'''
Authenticate with an invalid digest. The authentication should not succeed.
'''
with self.http_auth(self.make_header(
digest = 'nope'.encode('utf-8'))):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_invalid_digest_b64(self):
'''
Authenticate with an invalid digest, in base64.
The authentication should not succeed.
'''
with self.http_auth(self.make_header(b64_digest = 'nope')):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_replay_attack(self):
'''
Authenticate with a valid header twice. The second authentication should
be detected as a replay attack.
'''
header = self.make_header()
with self.http_auth(header):
response = self.client.get(self.base_url)
with self.http_auth(header):
second_response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(second_response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_replay_attack_multiple(self):
'''
Authenticate with a valid header multiple times.
The following authentication attempts should be detected as replay attacks.
'''
header = self.make_header()
with self.http_auth(header):
response = self.client.get(self.base_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for _ in range(10):
with self.http_auth(header):
new_resp = self.client.get(self.base_url)
self.assertEqual(new_resp.status_code, status.HTTP_401_UNAUTHORIZED)
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\n\nfrom PIL import Image, ExifTags\nimport magic\n\nfrom oclubs.access import fs\nfrom oclubs.exceptions impor... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\n\nfrom PIL import Image, ExifTags\nimport magic\n\nfrom oclubs.access import fs\nfrom oclubs.... | ```python
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
from __future__ import absolute_import, unicode_literals
import os
from PIL import Image, ExifTags
import magic
from oclubs.access import fs
from oclubs.exceptions import UploadNotSupported
from oclubs.objs.base import BaseObject, Property
ORIENTATION = next((key for key, val in ExifTags.TAGS.items()
if val == 'Orientation'))
class Upload(BaseObject):
table = 'upload'
identifier = 'upload_id'
club = Property('upload_club', 'Club')
uploader = Property('upload_user', 'User')
_location = Property('upload_loc')
mime = Property('upload_mime')
# Don't use mimetypes.guess_extension(mime) -- 'image/jpeg' => '.jpe'
_mimedict = {
'image/jpeg': '.jpg',
'image/png': '.png',
}
@property
def id(self):
return self._id
@property
def location_local(self):
if self.is_real:
return self.mk_internal_path(self._location)
else:
return self.mk_internal_path(-self.id, False)
@property
def location_external(self):
if self.is_real:
return self.mk_external_path(self._location)
else:
return self.mk_external_path(-self.id, False)
@classmethod
def handle(cls, user, club, file):
filename = os.urandom(8).encode('hex')
temppath = os.path.join('/tmp', filename)
file.save(temppath)
try:
# Don't use mimetypes.guess_type(temppath) -- Faked extensions
mime = magic.from_file(temppath, mime=True)
if mime not in cls._mimedict:
raise UploadNotSupported
filename = filename + cls._mimedict[mime]
permpath = cls.mk_internal_path(filename)
permdir = os.path.dirname(permpath)
if not os.path.isdir(permdir):
os.makedirs(permdir, 0o755)
# resize to 600, 450
cls._thumb(temppath, permpath)
fs.watch(permpath)
finally:
os.remove(temppath)
obj = cls.new()
obj.club = club
obj.uploader = user
obj._location = filename
obj.mime = mime
return obj.create()
@staticmethod
def _thumb(temppath, permpath):
im = Image.open(temppath)
try:
# HACK: Fixing EXIF orientation
exif = dict(im._getexif().items())
exif[ORIENTATION]
except Exception:
pass
else:
if exif[ORIENTATION] == 3:
im = im.rotate(180, expand=True)
elif exif[ORIENTATION] == 6:
im = im.rotate(270, expand=True)
elif exif[ORIENTATION] == 8:
im = im.rotate(90, expand=True)
im.thumbnail((4000, 3000))
im.save(permpath, optimize=True)
@staticmethod
def mk_relative_path(filename, is_upload=True):
if is_upload:
return os.path.join('images', filename[0], filename[:2], filename)
else:
return os.path.join('static/images/icons', 'icon%d.jpg' % filename)
@staticmethod
def mk_internal_path(filename, is_upload=True):
return os.path.join('/srv/oclubs',
Upload.mk_relative_path(filename, is_upload))
@staticmethod
def mk_external_path(filename, is_upload=True):
return os.path.join('/', Upload.mk_relative_path(filename, is_upload))
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Guewen Baconnier\n# Copyright 2013 Camptocamp SA\n#\n# This program is free software: you can redistribute it and/or modify\n# ... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Guewen Baconnier\n# Copyright 2013 Camptocamp SA\n#\n# This program is free software: you can redistribute it and... | ```python
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Stock Reserve',
'version': '0.1',
'author': 'Camptocamp',
'category': 'Warehouse',
'license': 'AGPL-3',
'complexity': 'normal',
'images': [],
'website': "http://www.camptocamp.com",
'description': """
Stock Reserve
=============
Allows to create stock reservations on products.
Each reservation can have a validity date, once passed, the reservation
is automatically lifted.
The reserved products are substracted from the virtual stock. It means
that if you reserved a quantity of products which bring the virtual
stock below the minimum, the orderpoint will be triggered and new
purchase orders will be generated. It also implies that the max may be
exceeded if the reservations are canceled.
""",
'depends': ['stock',
],
'demo': [],
'data': ['view/stock_reserve.xml',
'view/product.xml',
'data/stock_data.xml',
'security/ir.model.access.csv',
],
'auto_install': False,
'test': ['test/stock_reserve.yml',
],
'installable': True,
}
``` |
[
{
"content": "```python\n# This file holds physical constants and reads atomic weights.\nimport sys\nimport re\nimport os\nimport inspect\n###############\n\n# Physical Constants\n\nPHYSICAL_CONSTANTS = {\n 'h' : 6.626070E-34, # Planck's constants in J * s\n 'c' : 2.997925E+10, # speed of light in units... | [
{
"content": "<|memory_start|>```python\n# This file holds physical constants and reads atomic weights.\nimport sys\nimport re\nimport os\nimport inspect\n###############\n\n# Physical Constants\n\nPHYSICAL_CONSTANTS = {\n 'h' : 6.626070E-34, # Planck's constants in J * s\n 'c' : 2.997925E+10, # speed o... | ```python
# This file holds physical constants and reads atomic weights.
import sys
import re
import os
import inspect
###############
# Physical Constants
PHYSICAL_CONSTANTS = {
'h' : 6.626070E-34, # Planck's constants in J * s
'c' : 2.997925E+10, # speed of light in units of cm/s
'Eh' : 4.359745E-18, # energy of a hartree in units of J = kg m^2/s^2
'a0' : 5.291772E-11, # bohr radius in m
'atb': 5.291772E-01, # angstroms per bohr
'amu': 1.660468E-27, # atomic mass unit in units kg
'kB' : 1.380649E-23 # Boltzmann's constant in J/K
}
#CM/2.998E10/,EM/1.440E13/,HBC/1.4387/
###############
# Atomic Weight Information
class Element(object):
def __init__(self, full_name, atomic_number, symbol, default_mass):
# the name of this element, like "hydrogen"
full_name = str(full_name)
self.full_name = full_name
if re.match("[^a-z]", full_name):
print("Unexpected non-lowercase character in element name: %s" % full_name)
print("Quitting.")
sys.exit(1)
# the symbol of this element, like "H"
symbol = str(symbol)
self.symbol = symbol
if re.match("[^a-zA-Z]", symbol):
print("Unexpected non-letter character in element symbol: %s" % symbol)
print("Quitting.")
sys.exit(1)
if len(symbol) < 1 or len(symbol) > 2:
print("Unexpected length of element symbol (must be 1 or 2): %s" % symbol)
print("Quitting.")
sys.exit(1)
# the atomic number of this element, like 1
atomic_number = int(atomic_number)
self.atomic_number = atomic_number
if atomic_number < 1 or atomic_number > 200:
print("Unexpected atomic number: %d" % atomic_number)
print("Quitting.")
sys.exit(1)
# the average weight for this element, like 1.00783
default_mass = float(default_mass)
self.default_mass = default_mass
if default_mass < 0.0 or default_mass > 500.0:
print("Unexpected default mass: %d" % default_mass)
print("Quitting.")
sys.exit(1)
# pairs of tuples strings (like "2H") to masses (like 2.0141)
self.replacements = []
def __str__(self):
string = "%s (%s, Z=%d, default mass = %.4f" % (self.full_name.capitalize(), self.symbol, self.atomic_number, self.default_mass)
if len(self.replacements) == 0:
string += ", no isotopic replacements possible)\n"
else:
string += ")\n"
for s,m in self.replacements:
string += " %2s : %.4f\n" % (s,m)
return string[:-1]
def add_replacement(self, symbol, mass):
symbol = str(symbol)
if re.match("[^a-zA-Z0-9]", symbol):
print("Unexpected non-letter character in isotopic replacement symbol: %s" % symbol)
print("Quitting.")
sys.exit(1)
if len(symbol) < 1 or len(symbol) > 4:
print("Unexpected length of element symbol in replacement (must be 1-4 inclusive, found %d): %s" % (len(symbol), symbol))
print("Quitting.")
sys.exit(1)
for s,m in self.replacements:
if s == symbol:
print("Must use a unique symbol for every isotopic replacement: %s" % s)
sys.exit(1)
mass = float(mass)
if mass < 0.0 or mass > 500.0:
print("Unexpected isotopic replacement mass: %f" % mass)
sys.exit(1)
self.replacements.append((symbol,mass))
# read in atomic weight data
elements = []
root = os.path.split(os.path.abspath(__file__))[0]
for line in open(root + "/weights.dat", "r"):
# ignore comments and blank lines
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
line = line.split("#",1)[0]
# parse
fields = line.split(",") #line.encode("ascii","ignore").split(",")
if len(fields) < 4:
print("Error: not enough data on this line of weights.dat:")
print(line)
print("\nQuitting.")
sys.exit(1)
element = Element(*fields[0:4])
if (len(fields)-4) % 2 != 0:
print("Unexpected number of isotopic replacement fields on this line of weights.dat.")
print("The number of fields after the first four must be a multiple of 2 (found %d)." % (len(fields)-4))
print(line)
print("\nQuitting.")
sys.exit(1)
if (len(fields) > 4):
for i in range(4, len(fields), 2):
element.add_replacement(fields[i], fields[i+1])
elements.append(element)
#print element
print("Read atomic weight data for %d elements." % len(elements))
# map from atomic number to default masses
DEFAULT_MASSES = { e.atomic_number : e.default_mass for e in elements }
# map from valid isotopic replacements to masses
REPLACEMENTS = {}
for e in elements:
for replacement,mass in e.replacements:
REPLACEMENTS[replacement] = mass
# map from isotopic replacements to atomic numbers
REPLACEMENTS_Z = {}
for e in elements:
for replacement,mass in e.replacements:
REPLACEMENTS_Z[replacement]=e.atomic_number
# threshold to separate linear molecules from non-linear molecules
LINEARITY_THRESHOLD = 1e-06
DROP_NUM_LINEAR = 5
# DROP_NUM_NONLINEAR = 6
``` |
[
{
"content": "```python\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <opens... | [
{
"content": "<|memory_start|>```python\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\... | ```python
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/x509v3.h>
/*
* This is part of a work-around for the difficulty cffi has in dealing with
* `LHASH_OF(foo)` as the name of a type. We invent a new, simpler name that
* will be an alias for this type and use the alias throughout. This works
* together with another opaque typedef for the same name in the TYPES section.
* Note that the result is an opaque type.
*/
typedef LHASH_OF(CONF_VALUE) Cryptography_LHASH_OF_CONF_VALUE;
typedef STACK_OF(ACCESS_DESCRIPTION) Cryptography_STACK_OF_ACCESS_DESCRIPTION;
typedef STACK_OF(DIST_POINT) Cryptography_STACK_OF_DIST_POINT;
typedef STACK_OF(POLICYQUALINFO) Cryptography_STACK_OF_POLICYQUALINFO;
typedef STACK_OF(POLICYINFO) Cryptography_STACK_OF_POLICYINFO;
typedef STACK_OF(ASN1_INTEGER) Cryptography_STACK_OF_ASN1_INTEGER;
typedef STACK_OF(GENERAL_SUBTREE) Cryptography_STACK_OF_GENERAL_SUBTREE;
"""
TYPES = """
typedef ... Cryptography_STACK_OF_ACCESS_DESCRIPTION;
typedef ... Cryptography_STACK_OF_POLICYQUALINFO;
typedef ... Cryptography_STACK_OF_POLICYINFO;
typedef ... Cryptography_STACK_OF_ASN1_INTEGER;
typedef ... Cryptography_STACK_OF_GENERAL_SUBTREE;
typedef ... EXTENDED_KEY_USAGE;
typedef ... CONF;
typedef struct {
X509 *issuer_cert;
X509 *subject_cert;
...;
} X509V3_CTX;
typedef void * (*X509V3_EXT_D2I)(void *, const unsigned char **, long);
typedef struct {
ASN1_ITEM_EXP *it;
X509V3_EXT_D2I d2i;
...;
} X509V3_EXT_METHOD;
static const int GEN_OTHERNAME;
static const int GEN_EMAIL;
static const int GEN_X400;
static const int GEN_DNS;
static const int GEN_URI;
static const int GEN_DIRNAME;
static const int GEN_EDIPARTY;
static const int GEN_IPADD;
static const int GEN_RID;
typedef struct {
ASN1_OBJECT *type_id;
ASN1_TYPE *value;
} OTHERNAME;
typedef struct {
...;
} EDIPARTYNAME;
typedef struct {
int ca;
ASN1_INTEGER *pathlen;
} BASIC_CONSTRAINTS;
typedef struct {
Cryptography_STACK_OF_GENERAL_SUBTREE *permittedSubtrees;
Cryptography_STACK_OF_GENERAL_SUBTREE *excludedSubtrees;
} NAME_CONSTRAINTS;
typedef struct {
ASN1_INTEGER *requireExplicitPolicy;
ASN1_INTEGER *inhibitPolicyMapping;
} POLICY_CONSTRAINTS;
typedef struct {
int type;
union {
char *ptr;
OTHERNAME *otherName; /* otherName */
ASN1_IA5STRING *rfc822Name;
ASN1_IA5STRING *dNSName;
ASN1_TYPE *x400Address;
X509_NAME *directoryName;
EDIPARTYNAME *ediPartyName;
ASN1_IA5STRING *uniformResourceIdentifier;
ASN1_OCTET_STRING *iPAddress;
ASN1_OBJECT *registeredID;
/* Old names */
ASN1_OCTET_STRING *ip; /* iPAddress */
X509_NAME *dirn; /* dirn */
ASN1_IA5STRING *ia5; /* rfc822Name, dNSName, */
/* uniformResourceIdentifier */
ASN1_OBJECT *rid; /* registeredID */
ASN1_TYPE *other; /* x400Address */
} d;
...;
} GENERAL_NAME;
typedef struct {
GENERAL_NAME *base;
ASN1_INTEGER *minimum;
ASN1_INTEGER *maximum;
} GENERAL_SUBTREE;
typedef struct stack_st_GENERAL_NAME GENERAL_NAMES;
typedef struct {
ASN1_OCTET_STRING *keyid;
GENERAL_NAMES *issuer;
ASN1_INTEGER *serial;
} AUTHORITY_KEYID;
typedef struct {
ASN1_OBJECT *method;
GENERAL_NAME *location;
} ACCESS_DESCRIPTION;
typedef ... Cryptography_LHASH_OF_CONF_VALUE;
typedef ... Cryptography_STACK_OF_DIST_POINT;
typedef struct {
int type;
union {
GENERAL_NAMES *fullname;
Cryptography_STACK_OF_X509_NAME_ENTRY *relativename;
} name;
...;
} DIST_POINT_NAME;
typedef struct {
DIST_POINT_NAME *distpoint;
ASN1_BIT_STRING *reasons;
GENERAL_NAMES *CRLissuer;
...;
} DIST_POINT;
typedef struct {
ASN1_STRING *organization;
Cryptography_STACK_OF_ASN1_INTEGER *noticenos;
} NOTICEREF;
typedef struct {
NOTICEREF *noticeref;
ASN1_STRING *exptext;
} USERNOTICE;
typedef struct {
ASN1_OBJECT *pqualid;
union {
ASN1_IA5STRING *cpsuri;
USERNOTICE *usernotice;
ASN1_TYPE *other;
} d;
} POLICYQUALINFO;
typedef struct {
ASN1_OBJECT *policyid;
Cryptography_STACK_OF_POLICYQUALINFO *qualifiers;
} POLICYINFO;
"""
FUNCTIONS = """
int X509V3_EXT_add_alias(int, int);
void X509V3_set_ctx(X509V3_CTX *, X509 *, X509 *, X509_REQ *, X509_CRL *, int);
X509_EXTENSION *X509V3_EXT_nconf(CONF *, X509V3_CTX *, char *, char *);
GENERAL_NAME *GENERAL_NAME_new(void);
int GENERAL_NAME_print(BIO *, GENERAL_NAME *);
GENERAL_NAMES *GENERAL_NAMES_new(void);
void GENERAL_NAMES_free(GENERAL_NAMES *);
void *X509V3_EXT_d2i(X509_EXTENSION *);
"""
MACROS = """
/* This is a macro defined by a call to DECLARE_ASN1_FUNCTIONS in the
x509v3.h header. */
BASIC_CONSTRAINTS *BASIC_CONSTRAINTS_new(void);
void BASIC_CONSTRAINTS_free(BASIC_CONSTRAINTS *);
/* This is a macro defined by a call to DECLARE_ASN1_FUNCTIONS in the
x509v3.h header. */
AUTHORITY_KEYID *AUTHORITY_KEYID_new(void);
void AUTHORITY_KEYID_free(AUTHORITY_KEYID *);
NAME_CONSTRAINTS *NAME_CONSTRAINTS_new(void);
void NAME_CONSTRAINTS_free(NAME_CONSTRAINTS *);
OTHERNAME *OTHERNAME_new(void);
void OTHERNAME_free(OTHERNAME *);
POLICY_CONSTRAINTS *POLICY_CONSTRAINTS_new(void);
void POLICY_CONSTRAINTS_free(POLICY_CONSTRAINTS *);
void *X509V3_set_ctx_nodb(X509V3_CTX *);
int i2d_GENERAL_NAMES(GENERAL_NAMES *, unsigned char **);
GENERAL_NAMES *d2i_GENERAL_NAMES(GENERAL_NAMES **, const unsigned char **,
long);
int sk_GENERAL_NAME_num(struct stack_st_GENERAL_NAME *);
int sk_GENERAL_NAME_push(struct stack_st_GENERAL_NAME *, GENERAL_NAME *);
GENERAL_NAME *sk_GENERAL_NAME_value(struct stack_st_GENERAL_NAME *, int);
Cryptography_STACK_OF_ACCESS_DESCRIPTION *sk_ACCESS_DESCRIPTION_new_null(void);
int sk_ACCESS_DESCRIPTION_num(Cryptography_STACK_OF_ACCESS_DESCRIPTION *);
ACCESS_DESCRIPTION *sk_ACCESS_DESCRIPTION_value(
Cryptography_STACK_OF_ACCESS_DESCRIPTION *, int
);
void sk_ACCESS_DESCRIPTION_free(Cryptography_STACK_OF_ACCESS_DESCRIPTION *);
int sk_ACCESS_DESCRIPTION_push(Cryptography_STACK_OF_ACCESS_DESCRIPTION *,
ACCESS_DESCRIPTION *);
ACCESS_DESCRIPTION *ACCESS_DESCRIPTION_new(void);
void ACCESS_DESCRIPTION_free(ACCESS_DESCRIPTION *);
X509_EXTENSION *X509V3_EXT_conf_nid(Cryptography_LHASH_OF_CONF_VALUE *,
X509V3_CTX *, int, char *);
/* These aren't macros these functions are all const X on openssl > 1.0.x */
const X509V3_EXT_METHOD *X509V3_EXT_get(X509_EXTENSION *);
const X509V3_EXT_METHOD *X509V3_EXT_get_nid(int);
Cryptography_STACK_OF_DIST_POINT *sk_DIST_POINT_new_null(void);
void sk_DIST_POINT_free(Cryptography_STACK_OF_DIST_POINT *);
int sk_DIST_POINT_num(Cryptography_STACK_OF_DIST_POINT *);
DIST_POINT *sk_DIST_POINT_value(Cryptography_STACK_OF_DIST_POINT *, int);
int sk_DIST_POINT_push(Cryptography_STACK_OF_DIST_POINT *, DIST_POINT *);
void sk_POLICYINFO_free(Cryptography_STACK_OF_POLICYINFO *);
int sk_POLICYINFO_num(Cryptography_STACK_OF_POLICYINFO *);
POLICYINFO *sk_POLICYINFO_value(Cryptography_STACK_OF_POLICYINFO *, int);
int sk_POLICYINFO_push(Cryptography_STACK_OF_POLICYINFO *, POLICYINFO *);
Cryptography_STACK_OF_POLICYINFO *sk_POLICYINFO_new_null(void);
POLICYINFO *POLICYINFO_new(void);
void POLICYINFO_free(POLICYINFO *);
POLICYQUALINFO *POLICYQUALINFO_new(void);
void POLICYQUALINFO_free(POLICYQUALINFO *);
NOTICEREF *NOTICEREF_new(void);
void NOTICEREF_free(NOTICEREF *);
USERNOTICE *USERNOTICE_new(void);
void USERNOTICE_free(USERNOTICE *);
void sk_POLICYQUALINFO_free(Cryptography_STACK_OF_POLICYQUALINFO *);
int sk_POLICYQUALINFO_num(Cryptography_STACK_OF_POLICYQUALINFO *);
POLICYQUALINFO *sk_POLICYQUALINFO_value(Cryptography_STACK_OF_POLICYQUALINFO *,
int);
int sk_POLICYQUALINFO_push(Cryptography_STACK_OF_POLICYQUALINFO *,
POLICYQUALINFO *);
Cryptography_STACK_OF_POLICYQUALINFO *sk_POLICYQUALINFO_new_null(void);
Cryptography_STACK_OF_GENERAL_SUBTREE *sk_GENERAL_SUBTREE_new_null(void);
void sk_GENERAL_SUBTREE_free(Cryptography_STACK_OF_GENERAL_SUBTREE *);
int sk_GENERAL_SUBTREE_num(Cryptography_STACK_OF_GENERAL_SUBTREE *);
GENERAL_SUBTREE *sk_GENERAL_SUBTREE_value(
Cryptography_STACK_OF_GENERAL_SUBTREE *, int
);
int sk_GENERAL_SUBTREE_push(Cryptography_STACK_OF_GENERAL_SUBTREE *,
GENERAL_SUBTREE *);
GENERAL_SUBTREE *GENERAL_SUBTREE_new(void);
void sk_ASN1_INTEGER_free(Cryptography_STACK_OF_ASN1_INTEGER *);
int sk_ASN1_INTEGER_num(Cryptography_STACK_OF_ASN1_INTEGER *);
ASN1_INTEGER *sk_ASN1_INTEGER_value(Cryptography_STACK_OF_ASN1_INTEGER *, int);
int sk_ASN1_INTEGER_push(Cryptography_STACK_OF_ASN1_INTEGER *, ASN1_INTEGER *);
Cryptography_STACK_OF_ASN1_INTEGER *sk_ASN1_INTEGER_new_null(void);
X509_EXTENSION *X509V3_EXT_i2d(int, int, void *);
DIST_POINT *DIST_POINT_new(void);
void DIST_POINT_free(DIST_POINT *);
DIST_POINT_NAME *DIST_POINT_NAME_new(void);
void DIST_POINT_NAME_free(DIST_POINT_NAME *);
"""
CUSTOMIZATIONS = """
"""
``` |
[
{
"content": "Repeat the code exactly:\n```python\n#!/bin/env python\n\n# Automatically translated python version of \n# OpenSceneGraph example program \"osgscalarbar\"\n# !!! This program will need manual tuning before it will work. !!!\n\nimport sys\n\nfrom osgpypp import osg\nfrom osgpypp import osgDB\nfrom ... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n#!/bin/env python\n\n# Automatically translated python version of \n# OpenSceneGraph example program \"osgscalarbar\"\n# !!! This program will need manual tuning before it will work. !!!\n\nimport sys\n\nfrom osgpypp import osg\nfrom osgpypp imp... | ```python
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgscalarbar"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgSim
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgscalarbar.cpp'
# OpenSceneGraph example, osgscalarbar.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osg/Geode>
#include <osg/ShapeDrawable>
#include <osg/Material>
#include <osg/Texture2D>
#include <osg/MatrixTransform>
#include <osg/PositionAttitudeTransform>
#include <osg/BlendFunc>
#include <osg/ClearNode>
#include <osg/Projection>
#include <osgUtil/CullVisitor>
#include <osgGA/TrackballManipulator>
#include <osgViewer/Viewer>
#include <osgDB/ReadFile>
#include <osgSim/ScalarsToColors>
#include <osgSim/ColorRange>
#include <osgSim/ScalarBar>
#include <sstream>
#include <iostream>
#include <math.h>
using namespace osgSim
using osgSim.ScalarBar
#if defined(_MSC_VER)
# not have to have this pathway for just VS6.0 as its unable to handle the full
# ScalarBar.ScalarPrinter.printScalar scoping.
# Create a custom scalar printer
class MyScalarPrinter (ScalarBar.ScalarPrinter) :
def printScalar(scalar):
print "In MyScalarPrinter.printScalar"
if scalar==0.0 : return ScalarPrinter.printScalar(scalar)+" Bottom"
elif scalar==0.5 : return ScalarPrinter.printScalar(scalar)+" Middle"
elif scalar==1.0 : return ScalarPrinter.printScalar(scalar)+" Top"
else return ScalarPrinter.printScalar(scalar)
#else:
# Create a custom scalar printer
class MyScalarPrinter (ScalarBar.ScalarPrinter) :
def printScalar(scalar):
print "In MyScalarPrinter.printScalar"
if scalar==0.0 : return ScalarBar.ScalarPrinter.printScalar(scalar)+" Bottom"
elif scalar==0.5 : return ScalarBar.ScalarPrinter.printScalar(scalar)+" Middle"
elif scalar==1.0 : return ScalarBar.ScalarPrinter.printScalar(scalar)+" Top"
else return ScalarBar.ScalarPrinter.printScalar(scalar)
#endif
def createScalarBar():
#if 1
#ScalarsToColors* stc = ScalarsToColors(0.0,1.0)
#ScalarBar* sb = ScalarBar(2,3,stc,"STC_ScalarBar")
# Create a custom color set
cs = std.vector<osg.Vec4>()
cs.push_back(osg.Vec4(1.0,0.0,0.0,1.0)) # R
cs.push_back(osg.Vec4(0.0,1.0,0.0,1.0)) # G
cs.push_back(osg.Vec4(1.0,1.0,0.0,1.0)) # G
cs.push_back(osg.Vec4(0.0,0.0,1.0,1.0)) # B
cs.push_back(osg.Vec4(0.0,1.0,1.0,1.0)) # R
cr = ColorRange(0.0,1.0,cs)
sb = ScalarBar(20, 11, cr, "ScalarBar", ScalarBar.VERTICAL, 0.1, MyScalarPrinter)()
sb.setScalarPrinter(MyScalarPrinter)()
return sb
#else:
sb = ScalarBar()
tp = ScalarBar.TextProperties()
tp._fontFile = "fonts/times.ttf"
sb.setTextProperties(tp)
return sb
#endif
def createScalarBar_HUD():
geode = osgSim.ScalarBar()
tp = osgSim.ScalarBar.TextProperties()
tp._fontFile = "fonts/times.ttf"
geode.setTextProperties(tp)
stateset = geode.getOrCreateStateSet()
stateset.setMode(GL_LIGHTING, osg.StateAttribute.OFF)
stateset.setMode(GL_DEPTH_TEST,osg.StateAttribute.OFF)
stateset.setRenderBinDetails(11, "RenderBin")
modelview = osg.MatrixTransform()
modelview.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
matrix = osg.Matrixd(osg.Matrixd.scale(1000,1000,1000) * osg.Matrixd.translate(120,10,0)) # I've played with these values a lot and it seems to work, but I have no idea why
modelview.setMatrix(matrix)
modelview.addChild(geode)
projection = osg.Projection()
projection.setMatrix(osg.Matrix.ortho2D(0,1280,0,1024)) # or whatever the OSG window res is
projection.addChild(modelview)
return projection #make sure you delete the return sb line
int main(int , char **)
# construct the viewer.
viewer = osgViewer.Viewer()
group = osg.Group()
group.addChild(createScalarBar())
group.addChild(createScalarBar_HUD())
# add model to viewer.
viewer.setSceneData( group )
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\nimport os\nimport sys\n\nneighborDict = {} #dictionary containing neighbors of each node\nweightDict = {} #dictionary containing weights of edges\nfeatureDict = {} #dictionary containing features of each node\nfeatureDictTotal = {} #dictionay contai... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nimport os\nimport sys\n\nneighborDict = {} #dictionary containing neighbors of each node\nweightDict = {} #dictionary containing weights of edges\nfeatureDict = {} #dictionary containing features of each node\nfeatureDictTotal = {} #... | ```python
import os
import sys
neighborDict = {} #dictionary containing neighbors of each node
weightDict = {} #dictionary containing weights of edges
featureDict = {} #dictionary containing features of each node
featureDictTotal = {} #dictionay containing all listed features of each node
totalFeatureDict = {} #ditionary containing features of all nodes
# the path of data files
currPath = "../twitter"
# list all files
fileArray = os.listdir(currPath)
######## get totalFeature #############
for fileGraphName in fileArray:
if fileGraphName.endswith('.featnames'): # if the file is the '*.featnames' file which lists all possible features of current node
nodeNum = fileGraphName[0:len(fileGraphName)-10]; #get current node
fileGraphName = os.path.join(currPath, fileGraphName);
fileGraph = open(fileGraphName, 'r');
line = fileGraph.readline();
featureArray = []
while line:
line = line.rstrip();
lineArray = line.split(' ');
# add each feature into dictionary
if(not totalFeatureDict.has_key(lineArray[1])):
length = len(totalFeatureDict);
totalFeatureDict[lineArray[1]] = length;
featureArray.append(lineArray[1]);
line = fileGraph.readline();
featureDictTotal[nodeNum]=featureArray;
######## get features ###############
for fileGraphName in fileArray:
if fileGraphName.endswith('.egofeat'): # if the file is the '*.egofeat' file which lists the actual features of each node
nodeNum = fileGraphName[0:len(fileGraphName)-8]; #get current node
fileGraphName = os.path.join(currPath, fileGraphName);
fileGraph = open(fileGraphName, 'r');
line = fileGraph.readline();
features = []
while line:
line = line.rstrip();
lineArray = line.split(' ');
for i in range(0, len(lineArray)):
if(lineArray[i]=='1'): #'1' indicates that the node has the feature to which '1' corresponds
features.append(totalFeatureDict[featureDictTotal[nodeNum][i]]);
line = fileGraph.readline();
featureDict[nodeNum] = features;
######### get neighbors and weights #############
for fileGraphName in fileArray:
if fileGraphName.endswith('.feat'): # if the file is the '*.feat' file which lists all the neighbors of each node and their features
nodeNum = fileGraphName[0:len(fileGraphName)-5]; #get current node
fileGraphName = os.path.join(currPath, fileGraphName)
fileGraph = open(fileGraphName, 'r');
line = fileGraph.readline();
neighbor = []; # array to contain neighbors
weights = []; #array to contain weights
## get node features ##
fileNodeFeature = open(os.path.join(currPath, nodeNum+'.egofeat'), 'r');
lineEgoFeature = fileNodeFeature.readline();
lineEgoFeature = lineEgoFeature.rstrip();
lineEgoFeatureArray = lineEgoFeature.split(' ');
while line:
line = line.rstrip();
lineArray = line.split(' ');
neighbor.append(lineArray[0]);
weight = 0;
for i in range(0, len(lineEgoFeatureArray)):
if(lineArray[i+1]=='1' and lineEgoFeatureArray[i]=='1'):# if both a neighbor and current node have a same feature, weight increases by 1
weight+=1;
weights.append(weight);
line = fileGraph.readline();
neighborDict[nodeNum] = neighbor;
weightDict[nodeNum] = weights;
######### write to profile ################
### write feature and index num ####
fileName = 'featureIndex.txt'
fileOut = open(fileName, 'w');
for tag in totalFeatureDict.keys():
fileOut.writelines(tag+' '+str(totalFeatureDict[tag])+'\n')
fileOut.close()
### write neightbors and weights ####
fileName = 'graph.txt'
fileOut = open(fileName, 'w');
for nodeNum in neighborDict.keys():
line = nodeNum+' '+str(len(neighborDict[nodeNum]));
for i in range(0, len(neighborDict[nodeNum])):
line = line+' '+neighborDict[nodeNum][i];
line = line+' '+str(weightDict[nodeNum][i]);
line = line + ' ' + str(len(featureDict[nodeNum]));
for feature in featureDict[nodeNum]:
line = line + ' ' + str(feature);
line = line+'\n';
fileOut.writelines(line);
fileOut.close()
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n\"\"\"evaluation_framework.py -- All that is needed to evaluate feature selection algorithms.\"\"\"\n\nimport numpy as np\nimport tables as tb\nimport subprocess\nimport shlex\nimport math\n\nfrom sklearn import linear_model, metrics, model_sel... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n\"\"\"evaluation_framework.py -- All that is needed to evaluate feature selection algorithms.\"\"\"\n\nimport numpy as np\nimport tables as tb\nimport subprocess\nimport shlex\nimport math\n\nfrom sklearn import linear_model, me... | ```python
"""evaluation_framework.py -- All that is needed to evaluate feature selection algorithms."""
import numpy as np
import tables as tb
import subprocess
import shlex
import math
from sklearn import linear_model, metrics, model_selection
def consistency_index(sel1, sel2, num_features):
""" Compute the consistency index between two sets of features.
Parameters
----------
sel1: set
First set of indices of selected features
sel2: set
Second set of indices of selected features
num_features: int
Total number of features
Returns
-------
cidx: float
Consistency index between the two sets.
Reference
---------
Kuncheva, L.I. (2007). A Stability Index for Feature Selection.
AIAC, pp. 390--395.
"""
observed = float(len(sel1.intersection(sel2)))
expected = len(sel1) * len(sel2) / float(num_features)
maxposbl = float(min(len(sel1), len(sel2)))
cidx = -1.
# It's 0 and not 1 as expected if num_features == len(sel1) == len(sel2) => observed = n
# Because "take everything" and "take nothing" are trivial solutions we don't want to select
if expected != maxposbl:
cidx = (observed - expected) / (maxposbl - expected)
return cidx
def consistency_index_k(sel_list, num_features):
""" Compute the consistency index between more than 2 sets of features.
This is done by averaging over all pairwise consistency indices.
Parameters
----------
sel_list: list of lists
List of k lists of indices of selected features
num_features: int
Total number of features
Returns
-------
cidx: float
Consistency index between the k sets.
Reference
---------
Kuncheva, L.I. (2007). A Stability Index for Feature Selection.
AIAC, pp. 390--395.
"""
cidx = 0.
for k1, sel1 in enumerate(sel_list[:-1]):
# sel_list[:-1] to not take into account the last list.
# avoid a problem with sel_list[k1+1:] when k1 is the last element,
# that give an empty list overwise
# the work is done at the second to last element anyway
for sel2 in sel_list[k1+1:]:
cidx += consistency_index(set(sel1), set(sel2), num_features)
cidx = 2. * cidx / (len(sel_list) * (len(sel_list) - 1))
return cidx
def consistency_index_task(selection_fname, num_folds, num_tasks, num_features):
""" Compute consistency indices between the features selected for each fold at each task
Arguments
---------
selection_fname : filename
Template of path where were write list of selected features
num_folds: int
Total number of fold
num_tasks: int
Total number of task
num_features: int
Total number of features
Return
-------
ci_list
List of consistency indices between the features selected for each fold, task per task.
"""
# In the curret repeat repo, there is a file of selected feature for each fold
# in which each line is a task
# on each line, there is the space separated list of selected feature.
# we want consistency indices between the features selected for each fold, task per task
# so for each line in these files, we compute the ci between the features selected for each fold
# As there are ~10 folds, there are 10 files.
# they don't take a lot of memory
# so it is ok to open them all :
fold_f_list = []
for fold_idx in xrange (num_folds) :
f_sel = open(selection_fname %fold_idx, 'r')
fold_f_list.append(f_sel)
ci_list = []
# For each task :
for task_idx in xrange (num_tasks):
sel_list = []
for f_sel in fold_f_list :
# increment aline in each file
content = f_sel.readline().split()
# append lines content in sel_list
sel_list.append(content)
# compute the ci between the features selected for each fold at this current task
ci = consistency_index_k(sel_list, num_features)
ci_list.append(ci)
for f in fold_f_list :
f.close()
return ci_list
def run_sfan(num_tasks, network_fname, weights_fnames, params):
""" Run single task sfan (on each task).
Arguments
---------
num_tasks: int
Number of tasks.
network_fname: filename
Path to the network file.
weights_fnames: list of filenames
List of paths to the network nodes files (one per task).
params: string
Hyperparameters, in the '-l <lambda> -e <eta> -m <mu>' format.
Returns
-------
sel_list: list of lists
For each task, a list of selected features, as indices,
STARTING AT 0.
"""
# Ideally, I'd do the following:
# sfan_solver = Sfan(num_tasks, network_fname, weights_fname,
# lbd, eta, 0, covariance_fname)
# tt = sfan_solver.create_dimacs()
# sfan_solver.run_maxflow()
# But because cython output to screen is NOT caught by sys.stdout,
# we need to run this externally
argum = ['/usr/bin/time', '--format=%M',
'python', 'multitask_sfan.py',
'--num_tasks', str(num_tasks),
'--networks', network_fname,
'--node_weights']
argum.extend(weights_fnames)
argum.extend(params.split())
argum.extend(['-m', '0'])
print '+++'
print argum
p = subprocess.Popen(argum, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout=subprocess.PIPE -> something should read the output while the process is still running
# stderr=subprocess.STDOUT : To also capture standard error in the result
p_com = p.communicate()
p_out = p_com[0].split("\n")
p_err = p_com[1].split("\n")
print p_com
# Process the output to get lists of selected features
sel_list = [[(int(x)-1) for x in line.split()] for line in p_out[2:2+num_tasks]]
if not sel_list :
#TODO : fix no sel_list issue#1
#import pdb ; pdb.set_trace()
print "WARNING : returned sel_list empty !! algo = st ; param = ", params
sel_list = [[] for i in xrange(num_tasks)]
# Process the standart output to get timing info
timing = '\n'.join(p_out[2+num_tasks:])
# Process the standart error to get maxRSS info :
maxRSS = p_err[-2]
return sel_list, timing, maxRSS
def run_msfan_nocorr(num_tasks, network_fname, weights_fnames, params):
""" Run multitask sfan (no precision/covariance matrix).
Arguments
---------
num_tasks: int
Number of tasks.
network_fname: filename
Path to the network file.
weights_fnames: list of filenames
List of paths to the network nodes files (one per task).
params: string
Hyperparameters, in the '-l <lambda> -e <eta> -m <mu>' format.
Returns
-------
sel_list: list of lists
For each task, a list of selected features, as indices,
STARTING AT 0.
"""
argum = ['/usr/bin/time', '-f', '%M',
'python', 'multitask_sfan.py',
'--num_tasks', str(num_tasks),
'--networks', network_fname,
'--node_weights']
argum.extend(weights_fnames)
argum.extend(params.split())
print '+++'
print argum
p = subprocess.Popen(argum, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p_com = p.communicate()
p_out = p_com[0].split("\n")
p_err = p_com[1].split("\n")
print p_com
# Process the output to get lists of selected features
sel_list = [[(int(x)-1) for x in line.split()] for line in p_out[3:3+num_tasks]]
if not sel_list :
#TODO : fix no sel_list issue#1
#import pdb ; pdb.set_trace()
print "WARNING : returned sel_list empty !! algo = np ; param = ", params
sel_list = [[] for i in xrange(num_tasks)]
# Process the output to get timing info
timing = '\n'.join(p_out[3+num_tasks:])
# Process the outut to get maxRSS info :
maxRSS = p_err[-2]
return sel_list, timing, maxRSS
def run_msfan(num_tasks, network_fname, weights_fnames, covariance_fname, params):
""" Run multitask sfan.
Arguments
---------
num_tasks: int
Number of tasks.
network_fname: filename
Path to the network file.
weights_fnames: list of filenames
List of paths to the network nodes files (one per task).
covariance_fname: filename
Path to the matrix of covariance (similarity) of tasks.
params: string
Hyperparameters, in the '-l <lambda> -e <eta> -m <mu>' format.
Returns
-------
sel_list: list of lists
For each task, a list of selected features, as indices,
STARTING AT 0.
"""
argum = ['/usr/bin/time', '-f', '%M',
'python', 'multitask_sfan.py',
'--num_tasks', str(num_tasks),
'--networks', network_fname,
'--node_weights']
argum.extend(weights_fnames)
argum.extend(['--covariance_matrix', covariance_fname])
argum.extend(params.split())
print '+++'
print argum
p = subprocess.Popen(argum, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p_com = p.communicate()
p_out = p_com[0].split("\n")
p_err = p_com[1].split("\n")
print p_com
# Process the output to get lists of selected features
sel_list = [[(int(x)-1) for x in line.split()] for line in p_out[3:3+num_tasks]]
if not sel_list :
#TODO : fix no sel_list issue#1
#import pdb ; pdb.set_trace()
print "WARNING : returned sel_list empty !! algo = msfan ; param = ", params
sel_list = [[] for i in xrange(num_tasks)]
# Process the output to get timing info
timing = '\n'.join(p_out[3+num_tasks:])
# Process the outut to get maxRSS info :
maxRSS = p_err[-2]
return sel_list, timing, maxRSS
def get_optimal_parameters_from_dict(selected_dict, num_features):
""" Find optimal parameters from dictionary of selected features
Arguments
---------
selected_dict: dictionary
keys = parameters
values = dictionary
keys = task index
values = list of list of selected features (for each subsample)
num_features: int
Total number of features
Returns
-------
opt_params: string
Optimal parameters, leading to highest consistency index mean
of features selected for each subsample for each task
=> params leading to the best ci mean.
"""
opt_params = ''
opt_ci_mean = -1 # set to -1 because it is the worst case ci value
for (params, selected_dict_p) in selected_dict.iteritems():
ci_list = [] #list of ci, one per task, computed with current params
for (task_idx, sel_list) in selected_dict_p.iteritems():
ci_of_current_task = consistency_index_k(sel_list, num_features)
ci_list.append(ci_of_current_task)
ci_mean = np.mean(ci_list)
if ci_mean >= opt_ci_mean:
opt_ci_mean = ci_mean
opt_params = params
return opt_params
def run_ridge_selected(selected_features, genotype_fname, phenotype_fname,
tr_indices, te_indices, output_fname):
""" Run a ridge-regression using only the selected features.
Arguments
---------
selected_features: list
List of indices of selected features.
genotype_fname: filename
Path to genotype data.
phenotype_fname: filename
Path to phenotype data.
tr_indices: list
List of training indices.
te_indices: list
List of test indices.
output_fname: filename
Path to file where to write list of predictions on the test set.
Side effects
------------
Write predictions on the test set to output_fname
"""
# This function :
# - Learn a Ridge Regression model that links
# genotype of selected features (Xtr)
# with continuous phenotype (ytr)
# of the train set (tr)
# - Predicts continuous phenotype (preds)
# using genotype of selected features (Xte)
# of the test set (te)
# - Save predicted continuous phenotypes in a file.
# => it's a regression so il can only be used with continuous phenotype
#----------------------------------------
# Read data :
if not selected_features :
# Safeguard for when SFAN returns empty list
# Avoid not allowed empty selections
#TODO : fix no sel_list issue#1
#import pdb; pdb.set_trace()
print ('WARNING : no features was selected on this fold -> give NA predictions')
preds = np.array([np.nan ] * len(te_indices) )
else :
# read genotypes :
with tb.open_file(genotype_fname, 'r') as h5f:
table = h5f.root.Xtr
# table.shape :
# For each feature (in line),
# there is the genotype of each sample (in column)
X = table[selected_features, :]
Xtr = [X[:,tr] for tr in tr_indices]
Xte = [X[:,te] for te in te_indices]
# read phenotypes :
with open(phenotype_fname, 'r') as f:
# continuous phenotype for each sample (in line)
y = f.read().split()
y = [float(item) for item in y]
ytr = [ y[tr] for tr in tr_indices]
#----------------------------------------
# Instantiate a ridge regression
model = linear_model.RidgeCV()
# Train the ridge regression on the training set
model.fit(Xtr, ytr)
#----------------------------------------
# Make predictions on the test set
preds = model.predict(Xte)
# Save predictions
np.savetxt(output_fname, preds, fmt='%.3e')
def compute_ridge_selected_RMSE(phenotype_fnames, y_pred_template, xp_indices):
""" Compute RMSE (Root Mean Squared Error)
Arguments
---------
phenotype_fnames: aray of filename
Path to phenotype datas.
(one path per task)
y_pred_template: string
Template of path where were write list of predictions on the test set
xp_indices: list of dictionaries
fold_idx
{
'trIndices': list of train indices,
'teIndices': list of test indices,
'ssIndices': list of list of subsample indices
}
Return
-------
rmse_list:
List of rmse task per task.
"""
rmse_list = []
for task_idx, phenotype_fname in enumerate( phenotype_fnames ) :
#print "\n\n\n\n==== tache num %d" %task_idx
# For n inds :
# RMSE = sqrt { (1/n) [sum from m=1 to n : (ypred_m - ytrue_m)^2 ] }
# read all_y_true :
#print '\ni read phenotype_fnames[task_idx = %d] = %s' %(task_idx, phenotype_fnames[task_idx])
with open(phenotype_fname, 'r') as f_true:
all_y_true = [float(y) for y in f_true.read().split()]
#print "\nall_y_true = "
#print all_y_true
# read all_y_pred :
# predictions were made one by one, in order : [fold['teIndices'] for fold in xp_indices]
# we open each file (one per fold) and append predicted phenotypes
# then when sort them using all_y_pred_indices so the order will be 0,1,...,n
all_y_pred_indices = [index for sublist in [fold['teIndices'] for fold in xp_indices] for index in sublist]
all_y_pred = list()
num_folds = len(xp_indices)
for fold_idx in xrange(num_folds) :
with open(y_pred_template%(fold_idx, task_idx), 'r') as f_pred:
content = f_pred.read().split()
all_y_pred.extend(float(y) for y in content)
all_y_pred_sorted = [all_y_pred[i] for i in all_y_pred_indices]
#print "\n all_y_pred_sorted = "
#print all_y_pred_sorted
# compute rmse using metrics :
# wanted to use : rmse = metrics.mean_squared_error(all_y_true, all_y_pred_sorted)
# be if all_y_pred_sorted have NaN, there is a problem
# -> compute RMSE without NaN ind
#TODO : is it possible to have NA only for some ind ?
#if not : use if np.isnan(all_y_pred_sorted).any()
#and don't compute RMSE for this task without using not_NaN_idx
not_NaN_idx = np.where(~ np.isnan(all_y_pred_sorted) )[0]
if not not_NaN_idx.size : # if not_NaN_idx empty -> if there is only NaNs in all_y_preds
rmse = np.NaN
else :
not_NaN_y_true = [all_y_true[i] for i in not_NaN_idx]
not_NaN_y_pred_sorted = [all_y_pred_sorted[i] for i in not_NaN_idx]
rmse = math.sqrt (metrics.mean_squared_error(not_NaN_y_true, not_NaN_y_pred_sorted) )
#print "rmse = %f" % rmse
rmse_list.append(rmse)
# return :
return rmse_list
def evaluate_classification(causal_features, selected_features, num_features):
""" Compute metrics scoring classification, for all tasks.
Arguments
---------
causal_features: list of lists
List of lists of real causal features (one list per task).
selected_features: list of lists
List of lists of selected features (one list per task).
num_features : int
Total number of features
Returns
-------
acc_list: list
List of Accuracy, task per task.
fraction of correct predictions = predictions matching observations.
mcc_list: list
List of Matthews correlation coefficient task per task.
Better than Accuracy because classes are of very different sizes.
pre_list: list
List of Positive Precision = Predicted Values (PPV), task per task.
= TP / (TP + FP)
spe_list: list
List of Recall = Sensitivity = True Positive Rate (TPR), task per task.
= TP / (TP + FN)
"""
acc_list = []
mcc_list = []
pre_list = []
spe_list = []
# For each task,
for task_idx in xrange(len(causal_features)):
# at the beginning, we consider that the features are
# neither causal...
y_true = [False]*num_features
# ... nor predicted as such.
y_pred = [False]*num_features
# Then we change the status of the causal ones
# (these are y_true True),
for y_true_idx in causal_features[task_idx] :
y_true[y_true_idx] = True
# and of those that have been predicted as such
# (these are y_pred True).
for y_pred_idx in selected_features[task_idx] :
y_pred[y_pred_idx] = True
# and we compute
# - accuracy_score
# - matthews_corrcoef
# - precision_score
# - recall_score
# based on these 2 sets :
acc_list.append( metrics.accuracy_score (y_true, y_pred) )
mcc_list.append( metrics.matthews_corrcoef (y_true, y_pred) )
pre_list.append( metrics.precision_score (y_true, y_pred) )
spe_list.append( metrics.recall_score (y_true, y_pred) )
return acc_list, mcc_list, pre_list, spe_list
<<<<<<< HEAD
def compute_ppv_sensitivity(causal_fname, selected_list, num_features):
""" Compute PPV (Positive Predicted Values) = Accuracy = Precision
and sensitivity (true positive rate) for all tasks.
Arguments
---------
causal_fname: filename
File containing causal features (one line per task, space-separated).
selected_list: list of lists
List of lists of selected features (one list per task).
num_features : int
Total number of features
Returns
-------
ppv_list: list
List of Positive Predicted Values (PPV), task per task.
PPV = precision = TP / (TP + FP)
tpr_list: list
List of sensitivities (TPR), task per task.
sensitivities = recall = TP / (TP + FN)
"""
ppv_list = []
tpr_list = []
with open(causal_fname, 'r') as f:
# For each task,
for line_idx, line in enumerate(f):
# at the beginning, we consider that the features are
# neither causal...
y_true = [False]*num_features
# ... nor predicted as such.
y_pred = [False]*num_features
# Then we change the status of the causal ones
# (these are y_true True),
y_true_indx_list = map(int, line.split())
for y_true_indx in y_true_indx_list :
y_true[y_true_indx] = True
# and of those that have been predicted as such
# (these are y_pred True).
y_pred_indx_list = selected_list[line_idx]
for y_pred_indx in y_pred_indx_list :
y_pred[y_pred_indx] = True
# and we compute ppv and tpr based on these 2 sets :
ppv_list.append( metrics.accuracy_score(y_true, y_pred) )
tpr_list.append( metrics.recall_score(y_true, y_pred) )
return ppv_list, tpr_list
def extract_res_from_files(f_names, num_tasks, num_repeat, num_folds = None):
"""Compute mean and std per task from files holding values of measures.
Arguments
---------
f_names : filenames
Path to files in the order st, np, msfan
holding values whith space separated lists of values (on per task),
one line per repeat
num_tasks: int
Number of tasks.
num_repeat : int
Number of repeat
num_folds : int / None
Number of folds.
Needed for ppv and tpr files
because values per repeat are per fold then per task.
Return
-------
means, std : dict of list
for each algo ('st', 'msfan_np', 'msfan'), a list of means / std task per task
"""
means = {'st' : [float()*num_tasks] , 'np' : [float()*num_tasks], 'msfan' : [float()*num_tasks] }
std = {'st' : [float()*num_tasks] , 'np' : [float()*num_tasks], 'msfan' : [float()*num_tasks] }
algos = ['st', 'np', 'msfan']
if num_folds :
# for ppv and tpr file for which values per repeat = per line
# are per fold, then per task,
# we have to compute means per task taking account of value per repeat and per fold.
for algo_idx, algo in enumerate(algos) :
val_ci = [[float() for i in xrange(num_tasks)] for j in xrange(num_repeat)]
# val[num_repeat = num line][num_tasks]
with open (f_names[algo_idx], 'r') as f :
for j, line in enumerate(f) :
content = [float (item) for item in line.split()]
# content contains every float of the line
# we get values for a task using a slice :
content_task = []
for task_idx in xrange(num_tasks) :
content_task.append( np.mean(content[task_idx::num_tasks]) )
val_ci[j] = content_task
means[algo] = np.mean(val_ci, axis=0).tolist() # give the means for each col in the file = per task
std[algo]= np.std (val_ci, axis = 0).tolist()
else :
# for rmse and cosistency files :
# each lines = a repeat, each col : a task.
# -> means and std in col
for algo_idx, algo in enumerate(algos) :
val_ci = [[float() for i in xrange(num_tasks)] for j in xrange(num_repeat)]
# val[num_repeat = num line][num_tasks = num column]
with open (f_names[algo_idx], 'r') as f :
for j, line in enumerate(f) :
content_task = [float (item) for item in line.split()]
val_ci[j] = content_task
#use nanmean and nanstd so don't return nan if one of the number is nan
means[algo] = np.nanmean(val_ci, axis=0) # give the means for each col in the file = per task
std[algo]= np.nanstd (val_ci, axis = 0)
return means, std
class Framework(object):
""" Setting up evaluation framework.
Attributes
----------
self.num_samples: int
Number of samples.
self.num_folds: int
Number of cross-validation folds
self.num_subsamples: int
Number of subsamples (to evaluate stability)
self.xp_indices: list of dictionaries
fold_idx
{
'trIndices': list of train indices,
'teIndices': list of test indices,
'ssIndices': list of list of subsample indices
}
"""
def __init__(self, num_samples, num_folds, num_subsamples):
"""
Parameters
----------
num_samples: int
Number of samples.
num_folds: int
Number of cross-validation folds
num_subsamples: int
Number of subsamples (to evaluate stability)
"""
self.num_samples = num_samples
self.num_folds = num_folds
self.num_subsamples = num_subsamples
self.xp_indices = [{'trIndices': list(), 'teIndices':list(), 'ssIndices':list()} for fold in xrange(num_folds)]
def compute_indices(self, seed=None):
""" Compute the cross-validation folds and subsample indices.
Parameters
----------
seed: {int, None}, optional
random seed.
Will always return the same with the same random seed.
Modified attributes
-------------------
xp_indices: list of dictionaries
fold_idx
{
'trIndices': list of train indices,
'teIndices': list of test indices,
'ssIndices': list of list of subsample indices
}
"""
# use sklearn.cross_validation
kf = model_selection.KFold(n_splits=self.num_folds, shuffle=True,
random_state=seed).split(np.zeros(self.num_samples))
for fold_idx, (train_indices_f, test_indices_f) in enumerate(kf):
#print fold_idx, train_indices_f, test_indices_f
# Generate cross-validation indices
self.xp_indices[fold_idx]['trIndices'] = train_indices_f.tolist()
self.xp_indices[fold_idx]['teIndices'] = test_indices_f.tolist()
# For each train set, generate self.num_subsamples subsample sets of indices (90% of the train_set_f)
for i_ss in xrange(self.num_subsamples) :
train_indices_ss, test_indices_ss = model_selection.train_test_split(train_indices_f,
train_size=0.9)
self.xp_indices[fold_idx]['ssIndices'].append( train_indices_ss.tolist() )
def save_indices(self, out_dir, simu_id):
""" Save the cross-validation folds and subsample indices to files.
Parameters
----------
out_dir : dir path
fold where indices have to be saved
simu_id : string
Name of the simulation, to be used to name files.
Generated files
---------------
For each fold_idx:
<out_dir>/<simu_id>.fold<fold_idx>.trIndices:
Space-separated list of training indices.
<out_dir>/<simu_id>.fold<fold_idx>.teIndices:
Space-separated list of test indices.
For each subsample_idx:
<out_dir>/<simu_id>.fold<fold_idx>.ss<ss_idx>.ssIndices
Space-separated lists of subsample indices,
one line per list / subsample.
"""
# use np.savetxt ??? why ?
trIndices_fname = out_dir+'/'+simu_id+'.fold%d.trIndices'
teIndices_fname = out_dir+'/'+simu_id+'.fold%d.teIndices'
ssIndices_fname = out_dir+'/'+simu_id+'.fold%d.ss%d.ssIndices'
for fold_idx in xrange(self.num_folds) :
with open(trIndices_fname %(fold_idx), 'w') as trIndices_f :
trIndices_f.write( " ".join(str(i) for i in self.xp_indices[fold_idx]["trIndices"] ) )
#np.savetxt(trIndices_fname %(fold_idx), self.xp_indices[fold_idx]["trIndices"], delimiter=' ', fmt='%d')
with open(teIndices_fname %(fold_idx),'w') as teIndices_f :
teIndices_f.write( " ".join(str(i) for i in self.xp_indices[fold_idx]["teIndices"] ) )
#np.savetxt(teIndices_fname %(fold_idx), self.xp_indices[fold_idx]["teIndices"], delimiter=' ', fmt='%d')
for ss_idx in xrange(self.num_subsamples) :
with open(ssIndices_fname %(fold_idx,ss_idx), 'w') as ssIndices_f:
ssIndices_f.write( " ".join(str(i) for i in self.xp_indices[fold_idx]["ssIndices"][ss_idx] ) )
#np.savetxt(ssIndices_fname %(fold_idx,ss_idx), self.xp_indices[fold_idx]["ssIndices"][ss_idx], delimiter=' ', fmt='%d')
``` |
[
{
"content": "Here is the snippet:\n```python\nimport os\nimport random\nimport time\n\nimport json\nfrom locust import HttpLocust, TaskSet, task\n\nfrom lib.baseTaskSet import baseTaskSet\n\n# TODO - make these config-driven\nfrom lib.openstack.keystone import get_auth_token\nfrom lib.openstack.cinder import l... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\nimport os\nimport random\nimport time\n\nimport json\nfrom locust import HttpLocust, TaskSet, task\n\nfrom lib.baseTaskSet import baseTaskSet\n\n# TODO - make these config-driven\nfrom lib.openstack.keystone import get_auth_token\nfrom lib.openstack... | ```python
import os
import random
import time
import json
from locust import HttpLocust, TaskSet, task
from lib.baseTaskSet import baseTaskSet
# TODO - make these config-driven
from lib.openstack.keystone import get_auth_token
from lib.openstack.cinder import list_volumes
from lib.openstack.cinder import list_volumes_detail
from lib.openstack.cinder import list_volume_detail
from lib.openstack.cinder import create_volume
from lib.openstack.cinder import delete_volume
from lib.openstack.cinder import cinder_get_volume_id
from lib.openstack.nova import nova_get_image_id
from lib.openstack.nova import list_limits
class UserBehavior(baseTaskSet):
def on_start(self):
super(UserBehavior, self).on_start()
self.volume_id = None
self.volume_count = 0
self.sleep_times=[0,0,1,1,1,1,3,3,3,5,5,5,5,10,10,30,30]
self.auth_token, self.tenant_id, self.service_catalog = get_auth_token(self)
def chance(self):
chances = [1,1,1,1,2]
if random.choice(chances)%2==0:
return True
else:
return False
def rand_sleep(self):
time.sleep(random.choice(self.sleep_times))
@task(2)
def update_volume_id(self):
self.volume_id = cinder_get_volume_id(self)
@task(5)
def cinder_create_volume(self):
if not self.volume_id:
volume_id=None
image_id=None
bootable=False
size=1
# volume_id
if self.chance():
volume_id = cinder_get_volume_id(self)
# image_id
if self.chance():
image_id = nova_get_image_id(self)
# bootable
if self.chance():
bootable=True
# metadata
# size
sizes = [1,1,1,3,3,5,5,2.5,100,99,'a','abbazabba',-1,0]
size = random.choice(sizes)
# description
# snapshot_id
response = create_volume(self,
name="volume-%s-%s" % (self.id, self.volume_count),
volume_id=volume_id,
image_id=image_id,
bootable=bootable,
size=size)
print response.content
print '!'*80
self.volume_id = json.loads(response.content)['volume']['id']
self.volume_count += 1
self.rand_sleep()
else:
self.output('Volume already exists, not creating one:')
self.output("volume id: %s" % self.volume_id)
@task(2)
def cinder_delete_volume(self):
if self.volume_id:
delete_volume(self, self.volume_id)
# TODO - test response
self.volume_id = None
self.rand_sleep()
else:
self.cinder_create_volume()
@task(5)
def cinder_list_volumes(self):
list_volumes(self)
@task(5)
def cinder_list_volumes_detail(self):
list_volumes_detail(self)
@task(4)
def cinder_list_volume_detail(self):
list_volume_detail(self)
@task(1)
def nova_list_limits(self):
list_limits(self)
@task(1)
def keystone_get_auth(self):
self.auth_token, self.tenant_id, self.service_catalog = get_auth_token(self)
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait=500
max_wait=1000
``` |
[
{
"content": "```python\n# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.\n# This source code is licensed under both the GPLv2 (found in the\n# COPYING file in the root directory) and Apache 2.0 License\n# (found in the LICENSE.Apache file in the root directory).\n\nfrom advisor.rule_parser... | [
{
"content": "<|memory_start|>```python\n# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.\n# This source code is licensed under both the GPLv2 (found in the\n# COPYING file in the root directory) and Apache 2.0 License\n# (found in the LICENSE.Apache file in the root directory).\n\nfrom adv... | ```python
# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
# This source code is licensed under both the GPLv2 (found in the
# COPYING file in the root directory) and Apache 2.0 License
# (found in the LICENSE.Apache file in the root directory).
from advisor.rule_parser import RulesSpec
from advisor.db_log_parser import DatabaseLogs, DataSource
from advisor.db_options_parser import DatabaseOptions
from advisor.db_stats_fetcher import LogStatsParser, OdsStatsFetcher
import argparse
def main(args):
# initialise the RulesSpec parser
rule_spec_parser = RulesSpec(args.rules_spec)
rule_spec_parser.load_rules_from_spec()
rule_spec_parser.perform_section_checks()
# initialize the DatabaseOptions object
db_options = DatabaseOptions(args.rocksdb_options)
# Create DatabaseLogs object
db_logs = DatabaseLogs(
args.log_files_path_prefix, db_options.get_column_families()
)
# Create the Log STATS object
db_log_stats = LogStatsParser(
args.log_files_path_prefix, args.stats_dump_period_sec
)
data_sources = {
DataSource.Type.DB_OPTIONS: [db_options],
DataSource.Type.LOG: [db_logs],
DataSource.Type.TIME_SERIES: [db_log_stats]
}
if args.ods_client:
data_sources[DataSource.Type.TIME_SERIES].append(OdsStatsFetcher(
args.ods_client,
args.ods_entity,
args.ods_tstart,
args.ods_tend,
args.ods_key_prefix
))
triggered_rules = rule_spec_parser.get_triggered_rules(
data_sources, db_options.get_column_families()
)
rule_spec_parser.print_rules(triggered_rules)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Use this script to get\
suggestions for improving Rocksdb performance.')
parser.add_argument(
'--rules_spec', required=True, type=str,
help='path of the file containing the expert-specified Rules'
)
parser.add_argument(
'--rocksdb_options', required=True, type=str,
help='path of the starting Rocksdb OPTIONS file'
)
parser.add_argument(
'--log_files_path_prefix', required=True, type=str,
help='path prefix of the Rocksdb LOG files'
)
parser.add_argument(
'--stats_dump_period_sec', required=True, type=int,
help='the frequency (in seconds) at which STATISTICS are printed to ' +
'the Rocksdb LOG file'
)
# ODS arguments
parser.add_argument(
'--ods_client', type=str, help='the ODS client binary'
)
parser.add_argument(
'--ods_entity', type=str,
help='the servers for which the ODS stats need to be fetched'
)
parser.add_argument(
'--ods_key_prefix', type=str,
help='the prefix that needs to be attached to the keys of time ' +
'series to be fetched from ODS'
)
parser.add_argument(
'--ods_tstart', type=int,
help='start time of timeseries to be fetched from ODS'
)
parser.add_argument(
'--ods_tend', type=int,
help='end time of timeseries to be fetched from ODS'
)
args = parser.parse_args()
main(args)
``` |
[
{
"content": "Here is the snippet:\n```python\n##\n# Copyright 2013-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC)... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n##\n# Copyright 2013-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercompu... | ```python
##
# Copyright 2013-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Unit tests for easyconfig files.
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import re
import shutil
import sys
import tempfile
from distutils.version import LooseVersion
from unittest import TestCase, TestLoader, main
import easybuild.main as eb_main
import easybuild.tools.options as eboptions
from easybuild.base import fancylogger
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.easyblocks.generic.pythonpackage import PythonPackage
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig.default import DEFAULT_CONFIG
from easybuild.framework.easyconfig.format.format import DEPENDENCY_PARAMETERS
from easybuild.framework.easyconfig.easyconfig import get_easyblock_class, letter_dir_for
from easybuild.framework.easyconfig.easyconfig import resolve_template
from easybuild.framework.easyconfig.parser import EasyConfigParser, fetch_parameters_from_easyconfig
from easybuild.framework.easyconfig.tools import check_sha256_checksums, dep_graph, get_paths_for, process_easyconfig
from easybuild.tools import config
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import GENERAL_CLASS, build_option
from easybuild.tools.filetools import change_dir, is_generic_easyblock, remove_file
from easybuild.tools.filetools import verify_checksum, which, write_file
from easybuild.tools.module_naming_scheme.utilities import det_full_ec_version
from easybuild.tools.modules import modules_tool
from easybuild.tools.py2vs3 import string_type, urlopen
from easybuild.tools.robot import check_conflicts, resolve_dependencies
from easybuild.tools.run import run_cmd
from easybuild.tools.options import set_tmpdir
from easybuild.tools.utilities import nub
# indicates whether all the single tests are OK,
# and that bigger tests (building dep graph, testing for conflicts, ...) can be run as well
# other than optimizing for time, this also helps to get around problems like http://bugs.python.org/issue10949
single_tests_ok = True
class EasyConfigTest(TestCase):
"""Baseclass for easyconfig testcases."""
# make sure that the EasyBuild installation is still known even if we purge an EB module
if os.getenv('EB_SCRIPT_PATH') is None:
eb_path = which('eb')
if eb_path is not None:
os.environ['EB_SCRIPT_PATH'] = eb_path
# initialize configuration (required for e.g. default modules_tool setting)
eb_go = eboptions.parse_options()
config.init(eb_go.options, eb_go.get_options_by_section('config'))
build_options = {
'check_osdeps': False,
'external_modules_metadata': {},
'force': True,
'local_var_naming_check': 'error',
'optarch': 'test',
'robot_path': get_paths_for("easyconfigs")[0],
'silent': True,
'suffix_modules_path': GENERAL_CLASS,
'valid_module_classes': config.module_classes(),
'valid_stops': [x[0] for x in EasyBlock.get_steps()],
}
config.init_build_options(build_options=build_options)
set_tmpdir()
del eb_go
# put dummy 'craype-test' module in place, which is required for parsing easyconfigs using Cray* toolchains
TMPDIR = tempfile.mkdtemp()
os.environ['MODULEPATH'] = TMPDIR
write_file(os.path.join(TMPDIR, 'craype-test'), '#%Module\n')
log = fancylogger.getLogger("EasyConfigTest", fname=False)
# make sure a logger is present for main
eb_main._log = log
ordered_specs = None
parsed_easyconfigs = []
def process_all_easyconfigs(self):
"""Process all easyconfigs and resolve inter-easyconfig dependencies."""
# all available easyconfig files
easyconfigs_path = get_paths_for("easyconfigs")[0]
specs = glob.glob('%s/*/*/*.eb' % easyconfigs_path)
# parse all easyconfigs if they haven't been already
if not EasyConfigTest.parsed_easyconfigs:
for spec in specs:
EasyConfigTest.parsed_easyconfigs.extend(process_easyconfig(spec))
# filter out external modules
for ec in EasyConfigTest.parsed_easyconfigs:
for dep in ec['dependencies'][:]:
if dep.get('external_module', False):
ec['dependencies'].remove(dep)
EasyConfigTest.ordered_specs = resolve_dependencies(
EasyConfigTest.parsed_easyconfigs, modules_tool(), retain_all_deps=True)
def test_dep_graph(self):
"""Unit test that builds a full dependency graph."""
# pygraph dependencies required for constructing dependency graph are not available prior to Python 2.6
if LooseVersion(sys.version) >= LooseVersion('2.6') and single_tests_ok:
# temporary file for dep graph
(hn, fn) = tempfile.mkstemp(suffix='.dot')
os.close(hn)
if EasyConfigTest.ordered_specs is None:
self.process_all_easyconfigs()
dep_graph(fn, EasyConfigTest.ordered_specs)
remove_file(fn)
else:
print("(skipped dep graph test)")
def test_conflicts(self):
"""Check whether any conflicts occur in software dependency graphs."""
if not single_tests_ok:
print("(skipped conflicts test)")
return
if EasyConfigTest.ordered_specs is None:
self.process_all_easyconfigs()
self.assertFalse(check_conflicts(EasyConfigTest.ordered_specs, modules_tool(), check_inter_ec_conflicts=False),
"No conflicts detected")
def check_dep_vars(self, gen, dep, dep_vars):
"""Check whether available variants of a particular dependency are acceptable or not."""
# 'guilty' until proven 'innocent'
res = False
# filter out wrapped Java versions
# i.e. if the version of one is a prefix of the version of the other one (e.g. 1.8 & 1.8.0_181)
if dep == 'Java':
dep_vars_to_check = sorted(dep_vars.keys())
retained_dep_vars = []
while dep_vars_to_check:
dep_var = dep_vars_to_check.pop()
dep_var_version = dep_var.split(';')[0]
# remove dep vars wrapped by current dep var
dep_vars_to_check = [x for x in dep_vars_to_check if not x.startswith(dep_var_version + '.')]
retained_dep_vars = [x for x in retained_dep_vars if not x.startswith(dep_var_version + '.')]
retained_dep_vars.append(dep_var)
for key in list(dep_vars.keys()):
if key not in retained_dep_vars:
del dep_vars[key]
# filter out binutils with empty versionsuffix which is used to build toolchain compiler
if dep == 'binutils' and len(dep_vars) > 1:
empty_vsuff_vars = [v for v in dep_vars.keys() if v.endswith('versionsuffix: ')]
if len(empty_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != empty_vsuff_vars[0])
# multiple variants of HTSlib is OK as long as they are deps for a matching version of BCFtools;
# same goes for WRF and WPS
for dep_name, parent_name in [('HTSlib', 'BCFtools'), ('WRF', 'WPS')]:
if dep == dep_name and len(dep_vars) > 1:
for key in list(dep_vars):
ecs = dep_vars[key]
# filter out dep variants that are only used as dependency for parent with same version
dep_ver = re.search('^version: (?P<ver>[^;]+);', key).group('ver')
if all(ec.startswith('%s-%s-' % (parent_name, dep_ver)) for ec in ecs) and len(dep_vars) > 1:
dep_vars.pop(key)
# multiple versions of Boost is OK as long as they are deps for a matching Boost.Python
if dep == 'Boost' and len(dep_vars) > 1:
for key in list(dep_vars):
ecs = dep_vars[key]
# filter out Boost variants that are only used as dependency for Boost.Python with same version
boost_ver = re.search('^version: (?P<ver>[^;]+);', key).group('ver')
if all(ec.startswith('Boost.Python-%s-' % boost_ver) for ec in ecs):
dep_vars.pop(key)
# filter out FFTW and imkl with -serial versionsuffix which are used in non-MPI subtoolchains
if dep in ['FFTW', 'imkl']:
serial_vsuff_vars = [v for v in dep_vars.keys() if v.endswith('versionsuffix: -serial')]
if len(serial_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != serial_vsuff_vars[0])
# filter out BLIS and libFLAME with -amd versionsuffix
# (AMD forks, used in gobff/*-amd toolchains)
if dep in ['BLIS', 'libFLAME']:
amd_vsuff_vars = [v for v in dep_vars.keys() if v.endswith('versionsuffix: -amd')]
if len(amd_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != amd_vsuff_vars[0])
# filter out ScaLAPACK with -BLIS-* versionsuffix, used in goblf toolchain
if dep == 'ScaLAPACK':
blis_vsuff_vars = [v for v in dep_vars.keys() if '; versionsuffix: -BLIS-' in v]
if len(blis_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != blis_vsuff_vars[0])
if dep == 'ScaLAPACK':
# filter out ScaLAPACK with -bf versionsuffix, used in gobff toolchain
bf_vsuff_vars = [v for v in dep_vars.keys() if '; versionsuffix: -bf' in v]
if len(bf_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != bf_vsuff_vars[0])
# filter out ScaLAPACK with -bl versionsuffix, used in goblf toolchain
bl_vsuff_vars = [v for v in dep_vars.keys() if '; versionsuffix: -bl' in v]
if len(bl_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != bl_vsuff_vars[0])
# for some dependencies, we allow exceptions for software that depends on a particular version,
# as long as that's indicated by the versionsuffix
if dep in ['ASE', 'Boost', 'Java', 'Lua', 'PLUMED', 'PyTorch', 'R', 'TensorFlow'] and len(dep_vars) > 1:
for key in list(dep_vars):
dep_ver = re.search('^version: (?P<ver>[^;]+);', key).group('ver')
# use version of Java wrapper rather than full Java version
if dep == 'Java':
dep_ver = '.'.join(dep_ver.split('.')[:2])
# filter out dep version if all easyconfig filenames using it include specific dep version
if all(re.search('-%s-%s' % (dep, dep_ver), v) for v in dep_vars[key]):
dep_vars.pop(key)
# always retain at least one dep variant
if len(dep_vars) == 1:
break
# filter R dep for a specific version of Python 2.x
if dep == 'R' and len(dep_vars) > 1:
for key in list(dep_vars):
if '; versionsuffix: -Python-2' in key:
dep_vars.pop(key)
# always retain at least one variant
if len(dep_vars) == 1:
break
# filter out variants that are specific to a particular version of CUDA
cuda_dep_vars = [v for v in dep_vars.keys() if '-CUDA' in v]
if len(dep_vars) > len(cuda_dep_vars):
for key in list(dep_vars):
if re.search('; versionsuffix: .*-CUDA-[0-9.]+', key):
dep_vars.pop(key)
# some software packages require a specific (older/newer) version of a particular dependency
old_dep_versions = {
# EMAN2 2.3 requires Boost(.Python) 1.64.0
'Boost': [('1.64.0;', [r'Boost.Python-1\.64\.0-', r'EMAN2-2\.3-'])],
'Boost.Python': [('1.64.0;', [r'EMAN2-2\.3-'])],
# Kraken 1.x requires Jellyfish 1.x (Roary & metaWRAP depend on Kraken 1.x)
'Jellyfish': [(r'1\.', [r'Kraken-1\.', r'Roary-3\.12\.0', r'metaWRAP-1\.2'])],
# Libint 1.1.6 is required by older CP2K versions
'Libint': [(r'1\.1\.6', [r'CP2K-[3-6]'])],
# libxc 2.x or 3.x is required by ABINIT, AtomPAW, CP2K, GPAW, horton, PySCF, WIEN2k
# (Qiskit depends on PySCF), Elk 7.x requires libxc >= 5
'libxc': [
(r'[23]\.', [r'ABINIT-', r'AtomPAW-', r'CP2K-', r'GPAW-', r'horton-',
r'PySCF-', r'Qiskit-', r'WIEN2k-']),
(r'5\.', [r'Elk-']),
],
# some software depends on numba, which typically requires an older LLVM;
# this includes BirdNET, cell2location, cryoDRGN, librosa, PyOD, Python-Geometric, scVelo, scanpy
'LLVM': [
# numba 0.47.x requires LLVM 7.x or 8.x (see https://github.com/numba/llvmlite#compatibility)
(r'8\.', [r'numba-0\.47\.0-', r'librosa-0\.7\.2-', r'BirdNET-20201214-',
r'scVelo-0\.1\.24-', r'PyTorch-Geometric-1\.[34]\.2']),
(r'10\.0\.1', [r'cell2location-0\.05-alpha-', r'cryoDRGN-0\.3\.2-', r'loompy-3\.0\.6-',
r'numba-0\.52\.0-', r'PyOD-0\.8\.7-', r'PyTorch-Geometric-1\.6\.3',
r'scanpy-1\.7\.2-', r'umap-learn-0\.4\.6-']),
],
# rampart requires nodejs > 10, artic-ncov2019 requires rampart
'nodejs': [('12.16.1', ['rampart-1.2.0rc3-', 'artic-ncov2019-2020.04.13'])],
# OPERA requires SAMtools 0.x
'SAMtools': [(r'0\.', [r'ChimPipe-0\.9\.5', r'Cufflinks-2\.2\.1', r'OPERA-2\.0\.6',
r'CGmapTools-0\.1\.2', r'BatMeth2-2\.1'])],
# NanoPlot, NanoComp use an older version of Seaborn
'Seaborn': [(r'0\.10\.1', [r'NanoComp-1\.13\.1-', r'NanoPlot-1\.33\.0-'])],
'TensorFlow': [
# medaka 0.11.4/0.12.0 requires recent TensorFlow <= 1.14 (and Python 3.6),
# artic-ncov2019 requires medaka
('1.13.1;', ['medaka-0.11.4-', 'medaka-0.12.0-', 'artic-ncov2019-2020.04.13']),
# medaka 1.1.* and 1.2.* requires TensorFlow 2.2.0
# (while other 2019b easyconfigs use TensorFlow 2.1.0 as dep);
# TensorFlow 2.2.0 is also used as a dep for Horovod 0.19.5;
# decona 0.1.2 and NGSpeciesID 0.1.1.1 depend on medaka 1.1.3
('2.2.0;', ['medaka-1.2.[0]-', 'medaka-1.1.[13]-', 'Horovod-0.19.5-', 'decona-0.1.2-',
'NGSpeciesID-0.1.1.1-']),
],
# medaka 1.1.* and 1.2.* requires Pysam 0.16.0.1,
# which is newer than what others use as dependency w.r.t. Pysam version in 2019b generation;
# decona 0.1.2 and NGSpeciesID 0.1.1.1 depend on medaka 1.1.3
'Pysam': [('0.16.0.1;', ['medaka-1.2.[0]-', 'medaka-1.1.[13]-', 'decona-0.1.2-',
'NGSpeciesID-0.1.1.1-'])],
}
if dep in old_dep_versions and len(dep_vars) > 1:
for key in list(dep_vars):
for version_pattern, parents in old_dep_versions[dep]:
# filter out known old dependency versions
if re.search('^version: %s' % version_pattern, key):
# only filter if the easyconfig using this dep variants is known
if all(any(re.search(p, x) for p in parents) for x in dep_vars[key]):
dep_vars.pop(key)
# filter out ELSI variants with -PEXSI suffix
if dep == 'ELSI' and len(dep_vars) > 1:
pexsi_vsuff_vars = [v for v in dep_vars.keys() if v.endswith('versionsuffix: -PEXSI')]
if len(pexsi_vsuff_vars) == 1:
dep_vars = dict((k, v) for (k, v) in dep_vars.items() if k != pexsi_vsuff_vars[0])
# only single variant is always OK
if len(dep_vars) == 1:
res = True
elif len(dep_vars) == 2 and dep in ['Python', 'Tkinter']:
# for Python & Tkinter, it's OK to have on 2.x and one 3.x version
v2_dep_vars = [x for x in dep_vars.keys() if x.startswith('version: 2.')]
v3_dep_vars = [x for x in dep_vars.keys() if x.startswith('version: 3.')]
if len(v2_dep_vars) == 1 and len(v3_dep_vars) == 1:
res = True
# two variants is OK if one is for Python 2.x and the other is for Python 3.x (based on versionsuffix)
elif len(dep_vars) == 2:
py2_dep_vars = [x for x in dep_vars.keys() if '; versionsuffix: -Python-2.' in x]
py3_dep_vars = [x for x in dep_vars.keys() if '; versionsuffix: -Python-3.' in x]
if len(py2_dep_vars) == 1 and len(py3_dep_vars) == 1:
res = True
# for recent generations, there's no versionsuffix anymore for Python 3,
# but we still allow variants depending on Python 2.x + 3.x
is_recent_gen = False
full_toolchain_regex = re.compile(r'^20[1-9][0-9][ab]$')
gcc_toolchain_regex = re.compile(r'^GCC(core)?-[0-9]?[0-9]\.[0-9]$')
if full_toolchain_regex.match(gen):
is_recent_gen = LooseVersion(gen) >= LooseVersion('2020b')
elif gcc_toolchain_regex.match(gen):
genver = gen.split('-', 1)[1]
is_recent_gen = LooseVersion(genver) >= LooseVersion('10.2')
else:
raise EasyBuildError("Unkown type of toolchain generation: %s" % gen)
if is_recent_gen:
py2_dep_vars = [x for x in dep_vars.keys() if '; versionsuffix: -Python-2.' in x]
py3_dep_vars = [x for x in dep_vars.keys() if x.strip().endswith('; versionsuffix:')]
if len(py2_dep_vars) == 1 and len(py3_dep_vars) == 1:
res = True
return res
def test_check_dep_vars(self):
"""Test check_dep_vars utility method."""
# one single dep version: OK
self.assertTrue(self.check_dep_vars('2019b', 'testdep', {
'version: 1.2.3; versionsuffix:': ['foo-1.2.3.eb', 'bar-4.5.6.eb'],
}))
self.assertTrue(self.check_dep_vars('2019b', 'testdep', {
'version: 1.2.3; versionsuffix: -test': ['foo-1.2.3.eb', 'bar-4.5.6.eb'],
}))
# two or more dep versions (no special case: not OK)
self.assertFalse(self.check_dep_vars('2019b', 'testdep', {
'version: 1.2.3; versionsuffix:': ['foo-1.2.3.eb'],
'version: 4.5.6; versionsuffix:': ['bar-4.5.6.eb'],
}))
self.assertFalse(self.check_dep_vars('2019b', 'testdep', {
'version: 0.0; versionsuffix:': ['foobar-0.0.eb'],
'version: 1.2.3; versionsuffix:': ['foo-1.2.3.eb'],
'version: 4.5.6; versionsuffix:': ['bar-4.5.6.eb'],
}))
# Java is a special case, with wrapped Java versions
self.assertTrue(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
}))
# two Java wrappers is not OK
self.assertFalse(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6.eb'],
}))
# OK to have two or more wrappers if versionsuffix is used to indicate exception
self.assertTrue(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
}))
# versionsuffix must be there for all easyconfigs to indicate exception
self.assertFalse(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6-Java-11.eb', 'bar-4.5.6.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6-Java-11.eb', 'bar-4.5.6.eb'],
}))
self.assertTrue(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
'version: 12.1.6; versionsuffix:': ['foobar-0.0-Java-12.eb'],
'version: 12; versionsuffix:': ['foobar-0.0-Java-12.eb'],
}))
# strange situation: odd number of Java versions
# not OK: two Java wrappers (and no versionsuffix to indicate exception)
self.assertFalse(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6.eb'],
}))
# OK because of -Java-11 versionsuffix
self.assertTrue(self.check_dep_vars('2019b', 'Java', {
'version: 1.8.0_221; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
}))
# not OK: two Java wrappers (and no versionsuffix to indicate exception)
self.assertFalse(self.check_dep_vars('2019b', 'Java', {
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6.eb'],
}))
# OK because of -Java-11 versionsuffix
self.assertTrue(self.check_dep_vars('2019b', 'Java', {
'version: 1.8; versionsuffix:': ['foo-1.2.3.eb'],
'version: 11.0.2; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
'version: 11; versionsuffix:': ['bar-4.5.6-Java-11.eb'],
}))
# two different versions of Boost is not OK
self.assertFalse(self.check_dep_vars('2019b', 'Boost', {
'version: 1.64.0; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.70.0; versionsuffix:': ['foo-2.3.4.eb'],
}))
# a different Boost version that is only used as dependency for a matching Boost.Python is fine
self.assertTrue(self.check_dep_vars('2019a', 'Boost', {
'version: 1.64.0; versionsuffix:': ['Boost.Python-1.64.0-gompi-2019a.eb'],
'version: 1.70.0; versionsuffix:': ['foo-2.3.4.eb'],
}))
self.assertTrue(self.check_dep_vars('2019a', 'Boost', {
'version: 1.64.0; versionsuffix:': ['Boost.Python-1.64.0-gompi-2019a.eb'],
'version: 1.66.0; versionsuffix:': ['Boost.Python-1.66.0-gompi-2019a.eb'],
'version: 1.70.0; versionsuffix:': ['foo-2.3.4.eb'],
}))
self.assertFalse(self.check_dep_vars('2019a', 'Boost', {
'version: 1.64.0; versionsuffix:': ['Boost.Python-1.64.0-gompi-2019a.eb'],
'version: 1.66.0; versionsuffix:': ['foo-1.2.3.eb'],
'version: 1.70.0; versionsuffix:': ['foo-2.3.4.eb'],
}))
self.assertTrue(self.check_dep_vars('2018a', 'Boost', {
'version: 1.63.0; versionsuffix: -Python-2.7.14': ['EMAN2-2.21a-foss-2018a-Python-2.7.14-Boost-1.63.0.eb'],
'version: 1.64.0; versionsuffix:': ['Boost.Python-1.64.0-gompi-2018a.eb'],
'version: 1.66.0; versionsuffix:': ['BLAST+-2.7.1-foss-2018a.eb'],
}))
self.assertTrue(self.check_dep_vars('2019a', 'Boost', {
'version: 1.64.0; versionsuffix:': [
'Boost.Python-1.64.0-gompi-2019a.eb',
'EMAN2-2.3-foss-2019a-Python-2.7.15.eb',
],
'version: 1.70.0; versionsuffix:': [
'BLAST+-2.9.0-gompi-2019a.eb',
'Boost.Python-1.70.0-gompi-2019a.eb',
],
}))
# two variants is OK, if they're for Python 2.x and 3.x
self.assertTrue(self.check_dep_vars('2020a', 'Python', {
'version: 2.7.18; versionsuffix:': ['SciPy-bundle-2020.03-foss-2020a-Python-2.7.18.eb'],
'version: 3.8.2; versionsuffix:': ['SciPy-bundle-2020.03-foss-2020a-Python-3.8.2.eb'],
}))
self.assertTrue(self.check_dep_vars('2020a', 'SciPy-bundle', {
'version: 2020.03; versionsuffix: -Python-2.7.18': ['matplotlib-3.2.1-foss-2020a-Python-2.7.18.eb'],
'version: 2020.03; versionsuffix: -Python-3.8.2': ['matplotlib-3.2.1-foss-2020a-Python-3.8.2.eb'],
}))
# for recent easyconfig generations, there's no versionsuffix anymore for Python 3
self.assertTrue(self.check_dep_vars('2020b', 'Python', {
'version: 2.7.18; versionsuffix:': ['SciPy-bundle-2020.11-foss-2020b-Python-2.7.18.eb'],
'version: 3.8.6; versionsuffix:': ['SciPy-bundle-2020.11-foss-2020b.eb'],
}))
self.assertTrue(self.check_dep_vars('GCCcore-10.2', 'PyYAML', {
'version: 5.3.1; versionsuffix:': ['IPython-7.18.1-GCCcore-10.2.0.eb'],
'version: 5.3.1; versionsuffix: -Python-2.7.18': ['IPython-7.18.1-GCCcore-10.2.0-Python-2.7.18.eb'],
}))
self.assertTrue(self.check_dep_vars('2020b', 'SciPy-bundle', {
'version: 2020.11; versionsuffix: -Python-2.7.18': ['matplotlib-3.3.3-foss-2020b-Python-2.7.18.eb'],
'version: 2020.11; versionsuffix:': ['matplotlib-3.3.3-foss-2020b.eb'],
}))
# not allowed for older generations (foss/intel 2020a or older, GCC(core) 10.1.0 or older)
self.assertFalse(self.check_dep_vars('2020a', 'SciPy-bundle', {
'version: 2020.03; versionsuffix: -Python-2.7.18': ['matplotlib-3.2.1-foss-2020a-Python-2.7.18.eb'],
'version: 2020.03; versionsuffix:': ['matplotlib-3.2.1-foss-2020a.eb'],
}))
def test_dep_versions_per_toolchain_generation(self):
"""
Check whether there's only one dependency version per toolchain generation actively used.
This is enforced to try and limit the chance of running into conflicts when multiple modules built with
the same toolchain are loaded together.
"""
if EasyConfigTest.ordered_specs is None:
self.process_all_easyconfigs()
ecs_by_full_mod_name = dict((ec['full_mod_name'], ec) for ec in EasyConfigTest.parsed_easyconfigs)
if len(ecs_by_full_mod_name) != len(EasyConfigTest.parsed_easyconfigs):
self.fail('Easyconfigs with duplicate full_mod_name found')
# Cache already determined dependencies
ec_to_deps = dict()
def get_deps_for(ec):
"""Get list of (direct) dependencies for specified easyconfig."""
ec_mod_name = ec['full_mod_name']
deps = ec_to_deps.get(ec_mod_name)
if deps is None:
deps = []
for dep in ec['ec']['dependencies']:
dep_mod_name = dep['full_mod_name']
deps.append((dep['name'], dep['version'], dep['versionsuffix'], dep_mod_name))
# Note: Raises KeyError if dep not found
res = ecs_by_full_mod_name[dep_mod_name]
deps.extend(get_deps_for(res))
ec_to_deps[ec_mod_name] = deps
return deps
# some software also follows <year>{a,b} versioning scheme,
# which throws off the pattern matching done below for toolchain versions
false_positives_regex = re.compile('^MATLAB-Engine-20[0-9][0-9][ab]')
# restrict to checking dependencies of easyconfigs using common toolchains (start with 2018a)
# and GCCcore subtoolchain for common toolchains, starting with GCCcore 7.x
for pattern in ['201[89][ab]', '20[2-9][0-9][ab]', r'GCCcore-[7-9]\.[0-9]']:
all_deps = {}
regex = re.compile(r'^.*-(?P<tc_gen>%s).*\.eb$' % pattern)
# collect variants for all dependencies of easyconfigs that use a toolchain that matches
for ec in EasyConfigTest.ordered_specs:
ec_file = os.path.basename(ec['spec'])
# take into account software which also follows a <year>{a,b} versioning scheme
ec_file = false_positives_regex.sub('', ec_file)
res = regex.match(ec_file)
if res:
tc_gen = res.group('tc_gen')
all_deps_tc_gen = all_deps.setdefault(tc_gen, {})
for dep_name, dep_ver, dep_versuff, dep_mod_name in get_deps_for(ec):
dep_variants = all_deps_tc_gen.setdefault(dep_name, {})
# a variant is defined by version + versionsuffix
variant = "version: %s; versionsuffix: %s" % (dep_ver, dep_versuff)
# keep track of which easyconfig this is a dependency
dep_variants.setdefault(variant, set()).add(ec_file)
# check which dependencies have more than 1 variant
multi_dep_vars, multi_dep_vars_msg = [], ''
for tc_gen in sorted(all_deps.keys()):
for dep in sorted(all_deps[tc_gen].keys()):
dep_vars = all_deps[tc_gen][dep]
if not self.check_dep_vars(tc_gen, dep, dep_vars):
multi_dep_vars.append(dep)
multi_dep_vars_msg += "\nfound %s variants of '%s' dependency " % (len(dep_vars), dep)
multi_dep_vars_msg += "in easyconfigs using '%s' toolchain generation\n* " % tc_gen
multi_dep_vars_msg += '\n* '.join("%s as dep for %s" % v for v in sorted(dep_vars.items()))
multi_dep_vars_msg += '\n'
error_msg = "No multi-variant deps found for '%s' easyconfigs:\n%s" % (regex.pattern, multi_dep_vars_msg)
self.assertFalse(multi_dep_vars, error_msg)
def test_sanity_check_paths(self):
"""Make sure specified sanity check paths adher to the requirements."""
if not EasyConfigTest.parsed_easyconfigs:
self.process_all_easyconfigs()
for ec in EasyConfigTest.parsed_easyconfigs:
ec_scp = ec['ec']['sanity_check_paths']
if ec_scp != {}:
# if sanity_check_paths is specified (i.e., non-default), it must adher to the requirements
# both 'files' and 'dirs' keys, both with list values and with at least one a non-empty list
error_msg = "sanity_check_paths for %s does not meet requirements: %s" % (ec['spec'], ec_scp)
self.assertEqual(sorted(ec_scp.keys()), ['dirs', 'files'], error_msg)
self.assertTrue(isinstance(ec_scp['dirs'], list), error_msg)
self.assertTrue(isinstance(ec_scp['files'], list), error_msg)
self.assertTrue(ec_scp['dirs'] or ec_scp['files'], error_msg)
def test_r_libs_site_env_var(self):
"""Make sure $R_LIBS_SITE is being updated, rather than $R_LIBS."""
# cfr. https://github.com/easybuilders/easybuild-easyblocks/pull/2326
if not EasyConfigTest.parsed_easyconfigs:
self.process_all_easyconfigs()
r_libs_ecs = []
for ec in EasyConfigTest.parsed_easyconfigs:
for key in ('modextrapaths', 'modextravars'):
if 'R_LIBS' in ec['ec'][key]:
r_libs_ecs.append(ec['spec'])
error_msg = "%d easyconfigs found which set $R_LIBS, should be $R_LIBS_SITE: %s"
self.assertEqual(r_libs_ecs, [], error_msg % (len(r_libs_ecs), ', '.join(r_libs_ecs)))
def test_easyconfig_locations(self):
"""Make sure all easyconfigs files are in the right location."""
easyconfig_dirs_regex = re.compile(r'/easybuild/easyconfigs/[0a-z]/[^/]+$')
topdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
for (dirpath, _, filenames) in os.walk(topdir):
# ignore git/svn dirs & archived easyconfigs
if '/.git/' in dirpath or '/.svn/' in dirpath or '__archive__' in dirpath:
continue
# check whether list of .eb files is non-empty
easyconfig_files = [fn for fn in filenames if fn.endswith('eb')]
if easyconfig_files:
# check whether path matches required pattern
if not easyconfig_dirs_regex.search(dirpath):
# only exception: TEMPLATE.eb
if not (dirpath.endswith('/easybuild/easyconfigs') and filenames == ['TEMPLATE.eb']):
self.assertTrue(False, "List of easyconfig files in %s is empty: %s" % (dirpath, filenames))
def check_sha256_checksums(self, changed_ecs):
"""Make sure changed easyconfigs have SHA256 checksums in place."""
# list of software for which checksums can not be required,
# e.g. because 'source' files need to be constructed manually
whitelist = [
'Kent_tools-*',
'MATLAB-*',
'OCaml-*',
'OpenFOAM-Extend-4.1-*',
# sources for old versions of Bioconductor packages are no longer available,
# so not worth adding checksums for at this point
'R-bundle-Bioconductor-3.[2-5]',
]
# the check_sha256_checksums function (again) creates an EasyBlock instance
# for easyconfigs using the Bundle easyblock, this is a problem because the 'sources' easyconfig parameter
# is updated in place (sources for components are added to the 'parent' sources) in Bundle's __init__;
# therefore, we need to reset 'sources' to an empty list here if Bundle is used...
# likewise for 'patches' and 'checksums'
for ec in changed_ecs:
if ec['easyblock'] in ['Bundle', 'PythonBundle', 'EB_OpenSSL_wrapper']:
ec['sources'] = []
ec['patches'] = []
ec['checksums'] = []
# filter out deprecated easyconfigs
retained_changed_ecs = []
for ec in changed_ecs:
if not ec['deprecated']:
retained_changed_ecs.append(ec)
checksum_issues = check_sha256_checksums(retained_changed_ecs, whitelist=whitelist)
self.assertTrue(len(checksum_issues) == 0, "No checksum issues:\n%s" % '\n'.join(checksum_issues))
def check_python_packages(self, changed_ecs, added_ecs_filenames):
"""Several checks for easyconfigs that install (bundles of) Python packages."""
# These packages do not support installation with 'pip'
whitelist_pip = [
r'ESMPy-.*',
r'MATLAB-Engine-.*',
r'Meld-.*',
r'PyTorch-.*',
]
whitelist_pip_check = [
r'Mako-1.0.4.*Python-2.7.12.*',
# no pip 9.x or newer for configparser easyconfigs using a 2016a or 2016b toolchain
r'configparser-3.5.0.*-2016[ab].*',
]
failing_checks = []
python_default_urls = PythonPackage.extra_options()['source_urls'][0]
for ec in changed_ecs:
with ec.disable_templating():
ec_fn = os.path.basename(ec.path)
easyblock = ec.get('easyblock')
exts_defaultclass = ec.get('exts_defaultclass')
exts_default_options = ec.get('exts_default_options', {})
download_dep_fail = ec.get('download_dep_fail')
exts_download_dep_fail = ec.get('exts_download_dep_fail')
use_pip = ec.get('use_pip')
if use_pip is None:
use_pip = exts_default_options.get('use_pip')
# only easyconfig parameters as they are defined in the easyconfig file,
# does *not* include other easyconfig parameters with their default value!
pure_ec = ec.parser.get_config_dict()
# download_dep_fail should be set when using PythonPackage
if easyblock == 'PythonPackage':
if download_dep_fail is None:
failing_checks.append("'download_dep_fail' should be set in %s" % ec_fn)
if pure_ec.get('source_urls') == python_default_urls:
failing_checks.append("'source_urls' should not be defined when using the default value "
"in %s" % ec_fn)
# use_pip should be set when using PythonPackage or PythonBundle (except for whitelisted easyconfigs)
if easyblock in ['PythonBundle', 'PythonPackage']:
if use_pip is None and not any(re.match(regex, ec_fn) for regex in whitelist_pip):
failing_checks.append("'use_pip' should be set in %s" % ec_fn)
# download_dep_fail is enabled automatically in PythonBundle easyblock, so shouldn't be set
if easyblock == 'PythonBundle':
if download_dep_fail or exts_download_dep_fail:
fail = "'*download_dep_fail' should not be set in %s since PythonBundle easyblock is used" % ec_fn
failing_checks.append(fail)
if pure_ec.get('exts_default_options', {}).get('source_urls') == python_default_urls:
failing_checks.append("'source_urls' should not be defined in exts_default_options when using "
"the default value in %s" % ec_fn)
elif exts_defaultclass == 'PythonPackage':
# bundle of Python packages should use PythonBundle
if easyblock == 'Bundle':
fail = "'PythonBundle' easyblock should be used for bundle of Python packages in %s" % ec_fn
failing_checks.append(fail)
else:
# both download_dep_fail and use_pip should be set via exts_default_options
# when installing Python packages as extensions
for key in ['download_dep_fail', 'use_pip']:
if exts_default_options.get(key) is None:
failing_checks.append("'%s' should be set in exts_default_options in %s" % (key, ec_fn))
# if Python is a dependency, that should be reflected in the versionsuffix
# Tkinter is an exception, since its version always matches the Python version anyway
# Python 3.8.6 and later are also excluded, as we consider python 3 the default python
# Also whitelist some updated versions of Amber
whitelist_python_suffix = [
'Amber-16-*-2018b-AmberTools-17-patchlevel-10-15.eb',
'Amber-16-intel-2017b-AmberTools-17-patchlevel-8-12.eb',
'R-keras-2.1.6-foss-2018a-R-3.4.4.eb',
]
whitelisted = any(re.match(regex, ec_fn) for regex in whitelist_python_suffix)
has_python_dep = any(LooseVersion(dep['version']) < LooseVersion('3.8.6')
for dep in ec['dependencies'] if dep['name'] == 'Python')
if has_python_dep and ec.name != 'Tkinter' and not whitelisted:
if not re.search(r'-Python-[23]\.[0-9]+\.[0-9]+', ec['versionsuffix']):
msg = "'-Python-%%(pyver)s' should be included in versionsuffix in %s" % ec_fn
# This is only a failure for newly added ECs, not for existing ECS
# As that would probably break many ECs
if ec_fn in added_ecs_filenames:
failing_checks.append(msg)
else:
print('\nNote: Failed non-critical check: ' + msg)
else:
has_recent_python3_dep = any(LooseVersion(dep['version']) >= LooseVersion('3.8.6')
for dep in ec['dependencies'] if dep['name'] == 'Python')
if has_recent_python3_dep and re.search(r'-Python-3\.[0-9]+\.[0-9]+', ec['versionsuffix']):
msg = "'-Python-%%(pyver)s' should no longer be included in versionsuffix in %s" % ec_fn
failing_checks.append(msg)
# require that running of "pip check" during sanity check is enabled via sanity_pip_check
if easyblock in ['PythonBundle', 'PythonPackage']:
sanity_pip_check = ec.get('sanity_pip_check') or exts_default_options.get('sanity_pip_check')
if not sanity_pip_check and not any(re.match(regex, ec_fn) for regex in whitelist_pip_check):
failing_checks.append("sanity_pip_check should be enabled in %s" % ec_fn)
if failing_checks:
self.fail('\n'.join(failing_checks))
def check_R_packages(self, changed_ecs):
"""Several checks for easyconfigs that install (bundles of) R packages."""
failing_checks = []
for ec in changed_ecs:
ec_fn = os.path.basename(ec.path)
exts_defaultclass = ec.get('exts_defaultclass')
if exts_defaultclass == 'RPackage' or ec.name == 'R':
seen_exts = set()
for ext in ec['exts_list']:
if isinstance(ext, (tuple, list)):
ext_name = ext[0]
else:
ext_name = ext
if ext_name in seen_exts:
failing_checks.append('%s was added multiple times to exts_list in %s' % (ext_name, ec_fn))
else:
seen_exts.add(ext_name)
self.assertFalse(failing_checks, '\n'.join(failing_checks))
def check_sanity_check_paths(self, changed_ecs):
"""Make sure a custom sanity_check_paths value is specified for easyconfigs that use a generic easyblock."""
# some generic easyblocks already have a decent customised sanity_check_paths,
# including CMakePythonPackage, GoPackage, PythonBundle & PythonPackage;
# BuildEnv, ModuleRC and Toolchain easyblocks doesn't install anything so there is nothing to check.
whitelist = ['BuildEnv', 'CMakePythonPackage', 'CrayToolchain', 'GoPackage', 'ModuleRC',
'PythonBundle', 'PythonPackage', 'Toolchain']
# Bundles of dependencies without files of their own
# Autotools: Autoconf + Automake + libtool, (recent) GCC: GCCcore + binutils, CUDA: GCC + CUDAcore,
# CESM-deps: Python + Perl + netCDF + ESMF + git
bundles_whitelist = ['Autotools', 'CESM-deps', 'CUDA', 'GCC']
failing_checks = []
for ec in changed_ecs:
easyblock = ec.get('easyblock')
if is_generic_easyblock(easyblock) and not ec.get('sanity_check_paths'):
if easyblock in whitelist or (easyblock == 'Bundle' and ec['name'] in bundles_whitelist):
pass
else:
ec_fn = os.path.basename(ec.path)
failing_checks.append("No custom sanity_check_paths found in %s" % ec_fn)
self.assertFalse(failing_checks, '\n'.join(failing_checks))
def check_https(self, changed_ecs):
"""Make sure https:// URL is used (if it exists) for homepage/source_urls (rather than http://)."""
whitelist = [
'Kaiju', # invalid certificate at https://kaiju.binf.ku.dk
'libxml2', # https://xmlsoft.org works, but invalid certificate
'p4vasp', # https://www.p4vasp.at doesn't work
'ITSTool', # https://itstool.org/ doesn't work
'UCX-', # bad certificate for https://www.openucx.org
'MUMPS', # https://mumps.enseeiht.fr doesn't work
'PyFR', # https://www.pyfr.org doesn't work
'PycURL', # bad certificate for https://pycurl.io/
]
url_whitelist = [
# https:// doesn't work, results in index page being downloaded instead
# (see https://github.com/easybuilders/easybuild-easyconfigs/issues/9692)
'http://isl.gforge.inria.fr',
# https:// leads to File Not Found
'http://tau.uoregon.edu/',
# https:// has outdated SSL configurations
'http://faculty.scs.illinois.edu',
]
http_regex = re.compile('http://[^"\'\n]+', re.M)
failing_checks = []
for ec in changed_ecs:
ec_fn = os.path.basename(ec.path)
# skip whitelisted easyconfigs
if any(ec_fn.startswith(x) for x in whitelist):
continue
# ignore commented out lines in easyconfig files when checking for http:// URLs
ec_txt = '\n'.join(line for line in ec.rawtxt.split('\n') if not line.startswith('#'))
for http_url in http_regex.findall(ec_txt):
# skip whitelisted http:// URLs
if any(http_url.startswith(x) for x in url_whitelist):
continue
https_url = http_url.replace('http://', 'https://')
try:
https_url_works = bool(urlopen(https_url, timeout=5))
except Exception:
https_url_works = False
if https_url_works:
failing_checks.append("Found http:// URL in %s, should be https:// : %s" % (ec_fn, http_url))
if failing_checks:
self.fail('\n'.join(failing_checks))
def test_changed_files_pull_request(self):
"""Specific checks only done for the (easyconfig) files that were changed in a pull request."""
def get_eb_files_from_diff(diff_filter):
# first determine the 'merge base' between target branch and PR branch
# cfr. https://git-scm.com/docs/git-merge-base
cmd = "git merge-base %s HEAD" % target_branch
out, ec = run_cmd(cmd, simple=False, log_ok=False)
if ec == 0:
merge_base = out.strip()
print("Merge base for %s and HEAD: %s" % (target_branch, merge_base))
else:
msg = "Failed to determine merge base (ec: %s, output: '%s'), "
msg += "falling back to specifying target branch %s"
print(msg % (ec, out, target_branch))
merge_base = target_branch
# determine list of changed files using 'git diff' and merge base determined above
cmd = "git diff --name-only --diff-filter=%s %s..HEAD --" % (diff_filter, merge_base)
out, _ = run_cmd(cmd, simple=False)
return [os.path.basename(f) for f in out.strip().split('\n') if f.endswith('.eb')]
# $TRAVIS_PULL_REQUEST should be a PR number, otherwise we're not running tests for a PR
travis_pr_test = re.match('^[0-9]+$', os.environ.get('TRAVIS_PULL_REQUEST', '(none)'))
# when testing a PR in GitHub Actions, $GITHUB_EVENT_NAME will be set to 'pull_request'
github_pr_test = os.environ.get('GITHUB_EVENT_NAME') == 'pull_request'
if travis_pr_test or github_pr_test:
# target branch should be anything other than 'master';
# usually is 'develop', but could also be a release branch like '3.7.x'
if travis_pr_test:
target_branch = os.environ.get('TRAVIS_BRANCH', None)
else:
target_branch = os.environ.get('GITHUB_BASE_REF', None)
if target_branch is None:
self.assertTrue(False, "Failed to determine target branch for current pull request.")
if target_branch != 'main':
if not EasyConfigTest.parsed_easyconfigs:
self.process_all_easyconfigs()
# relocate to top-level directory of repository to run 'git diff' command
top_dir = os.path.dirname(os.path.dirname(get_paths_for('easyconfigs')[0]))
cwd = change_dir(top_dir)
# get list of changed easyconfigs
changed_ecs_filenames = get_eb_files_from_diff(diff_filter='M')
added_ecs_filenames = get_eb_files_from_diff(diff_filter='A')
if changed_ecs_filenames:
print("\nList of changed easyconfig files in this PR:\n\t%s" % '\n\t'.join(changed_ecs_filenames))
if added_ecs_filenames:
print("\nList of added easyconfig files in this PR:\n\t%s" % '\n\t'.join(added_ecs_filenames))
change_dir(cwd)
# grab parsed easyconfigs for changed easyconfig files
changed_ecs = []
for ec_fn in changed_ecs_filenames + added_ecs_filenames:
match = None
for ec in EasyConfigTest.parsed_easyconfigs:
if os.path.basename(ec['spec']) == ec_fn:
match = ec['ec']
break
if match:
changed_ecs.append(match)
else:
# if no easyconfig is found, it's possible some archived easyconfigs were touched in the PR...
# so as a last resort, try to find the easyconfig file in __archive__
easyconfigs_path = get_paths_for("easyconfigs")[0]
specs = glob.glob('%s/__archive__/*/*/%s' % (easyconfigs_path, ec_fn))
if len(specs) == 1:
ec = process_easyconfig(specs[0])[0]
changed_ecs.append(ec['ec'])
else:
error_msg = "Failed to find parsed easyconfig for %s" % ec_fn
error_msg += " (and could not isolate it in easyconfigs archive either)"
self.assertTrue(False, error_msg)
# run checks on changed easyconfigs
self.check_sha256_checksums(changed_ecs)
self.check_python_packages(changed_ecs, added_ecs_filenames)
self.check_R_packages(changed_ecs)
self.check_sanity_check_paths(changed_ecs)
self.check_https(changed_ecs)
def test_zzz_cleanup(self):
"""Dummy test to clean up global temporary directory."""
shutil.rmtree(self.TMPDIR)
def template_easyconfig_test(self, spec):
"""Tests for an individual easyconfig: parsing, instantiating easyblock, check patches, ..."""
# set to False, so it's False in case of this test failing
global single_tests_ok
prev_single_tests_ok = single_tests_ok
single_tests_ok = False
# parse easyconfig
ecs = process_easyconfig(spec)
if len(ecs) == 1:
ec = ecs[0]['ec']
# cache the parsed easyconfig, to avoid that it is parsed again
EasyConfigTest.parsed_easyconfigs.append(ecs[0])
else:
self.assertTrue(False, "easyconfig %s does not contain blocks, yields only one parsed easyconfig" % spec)
# check easyconfig file name
expected_fn = '%s-%s.eb' % (ec['name'], det_full_ec_version(ec))
msg = "Filename '%s' of parsed easyconfig matches expected filename '%s'" % (spec, expected_fn)
self.assertEqual(os.path.basename(spec), expected_fn, msg)
name, easyblock = fetch_parameters_from_easyconfig(ec.rawtxt, ['name', 'easyblock'])
# make sure easyconfig file is in expected location
expected_subdir = os.path.join('easybuild', 'easyconfigs', letter_dir_for(name), name)
subdir = os.path.join(*spec.split(os.path.sep)[-5:-1])
fail_msg = "Easyconfig file %s not in expected subdirectory %s" % (spec, expected_subdir)
self.assertEqual(expected_subdir, subdir, fail_msg)
# sanity check for software name, moduleclass
self.assertEqual(ec['name'], name)
self.assertTrue(ec['moduleclass'] in build_option('valid_module_classes'))
# instantiate easyblock with easyconfig file
app_class = get_easyblock_class(easyblock, name=name)
# check that automagic fallback to ConfigureMake isn't done (deprecated behaviour)
fn = os.path.basename(spec)
error_msg = "%s relies on automagic fallback to ConfigureMake, should use easyblock = 'ConfigureMake' instead" % fn
self.assertTrue(easyblock or app_class is not ConfigureMake, error_msg)
# dump the easyconfig file;
# this should be done before creating the easyblock instance (done below via app_class),
# because some easyblocks (like PythonBundle) modify easyconfig parameters at initialisation
handle, test_ecfile = tempfile.mkstemp()
os.close(handle)
ec.dump(test_ecfile)
dumped_ec = EasyConfigParser(test_ecfile).get_config_dict()
os.remove(test_ecfile)
app = app_class(ec)
# more sanity checks
self.assertTrue(name, app.name)
self.assertTrue(ec['version'], app.version)
# make sure that deprecated 'dummy' toolchain is no longer used, should use 'system' toolchain instead
# but give recent EasyBuild easyconfigs special treatment to avoid breaking "eb --install-latest-eb-release"
ec_fn = os.path.basename(spec)
if not (ec_fn == 'EasyBuild-3.9.4.eb' or ec_fn.startswith('EasyBuild-4.')):
error_msg_tmpl = "%s should use 'system' toolchain rather than deprecated 'dummy' toolchain"
self.assertFalse(ec['toolchain']['name'] == 'dummy', error_msg_tmpl % os.path.basename(spec))
# make sure that $root is not used, since it is not compatible with module files in Lua syntax
res = re.findall(r'.*\$root.*', ec.rawtxt, re.M)
error_msg = "Found use of '$root', not compatible with modules in Lua syntax, use '%%(installdir)s' instead: %s"
self.assertFalse(res, error_msg % res)
# check for redefined easyconfig parameters, there should be none...
param_def_regex = re.compile(r'^(?P<key>\w+)\s*=', re.M)
keys = param_def_regex.findall(ec.rawtxt)
redefined_keys = []
for key in sorted(nub(keys)):
cnt = keys.count(key)
if cnt > 1:
redefined_keys.append((key, cnt))
redefined_keys_error_msg = "There should be no redefined easyconfig parameters, found %d: " % len(redefined_keys)
redefined_keys_error_msg += ', '.join('%s (%d)' % x for x in redefined_keys)
self.assertFalse(redefined_keys, redefined_keys_error_msg)
# make sure old GitHub urls for EasyBuild that include 'hpcugent' are no longer used
old_urls = [
'github.com/hpcugent/easybuild',
'hpcugent.github.com/easybuild',
'hpcugent.github.io/easybuild',
]
for old_url in old_urls:
self.assertFalse(old_url in ec.rawtxt, "Old URL '%s' not found in %s" % (old_url, spec))
# make sure binutils is included as a (build) dep if toolchain is GCCcore
if ec['toolchain']['name'] == 'GCCcore':
# with 'Tarball' easyblock: only unpacking, no building; Eigen is also just a tarball
requires_binutils = ec['easyblock'] not in ['Tarball'] and ec['name'] not in ['Eigen']
# let's also exclude the very special case where the system GCC is used as GCCcore, and only apply this
# exception to the dependencies of binutils (since we should eventually build a new binutils with GCCcore)
if ec['toolchain']['version'] == 'system':
binutils_complete_dependencies = ['M4', 'Bison', 'flex', 'help2man', 'zlib', 'binutils']
requires_binutils &= bool(ec['name'] not in binutils_complete_dependencies)
# if no sources/extensions/components are specified, it's just a bundle (nothing is being compiled)
requires_binutils &= bool(ec['sources'] or ec['exts_list'] or ec.get('components'))
if requires_binutils:
# dependencies() returns both build and runtime dependencies
# in some cases, binutils can also be a runtime dep (e.g. for Clang)
dep_names = [d['name'] for d in ec.dependencies()]
self.assertTrue('binutils' in dep_names, "binutils is a build dep in %s: %s" % (spec, dep_names))
src_cnt = len(ec['sources'])
patch_checksums = ec['checksums'][src_cnt:]
patch_checksums_cnt = len(patch_checksums)
# make sure all patch files are available
specdir = os.path.dirname(spec)
specfn = os.path.basename(spec)
for idx, patch in enumerate(ec['patches']):
if isinstance(patch, (tuple, list)):
patch = patch[0]
# only check actual patch files, not other files being copied via the patch functionality
patch_full = os.path.join(specdir, patch)
if patch.endswith('.patch'):
msg = "Patch file %s is available for %s" % (patch_full, specfn)
self.assertTrue(os.path.isfile(patch_full), msg)
# verify checksum for each patch file
if idx < patch_checksums_cnt and (os.path.exists(patch_full) or patch.endswith('.patch')):
checksum = patch_checksums[idx]
error_msg = "Invalid checksum for patch file %s in %s: %s" % (patch, ec_fn, checksum)
res = verify_checksum(patch_full, checksum)
self.assertTrue(res, error_msg)
# make sure 'source' step is not being skipped,
# since that implies not verifying the checksum
error_msg = "'source' step should not be skipped in %s, since that implies not verifying checksums" % ec_fn
self.assertFalse(ec['checksums'] and ('source' in ec['skipsteps']), error_msg)
for ext in ec['exts_list']:
if isinstance(ext, (tuple, list)) and len(ext) == 3:
ext_name = ext[0]
self.assertTrue(isinstance(ext[2], dict), "3rd element of extension spec is a dictionary")
# fall back to assuming a single source file for an extension
src_cnt = len(ext[2].get('sources', [])) or 1
checksums = ext[2].get('checksums', [])
patch_checksums = checksums[src_cnt:]
for idx, ext_patch in enumerate(ext[2].get('patches', [])):
if isinstance(ext_patch, (tuple, list)):
ext_patch = ext_patch[0]
# only check actual patch files, not other files being copied via the patch functionality
ext_patch_full = os.path.join(specdir, ext_patch)
if ext_patch.endswith('.patch'):
msg = "Patch file %s is available for %s" % (ext_patch_full, specfn)
self.assertTrue(os.path.isfile(ext_patch_full), msg)
# verify checksum for each patch file
if idx < patch_checksums_cnt and (os.path.exists(ext_patch_full) or ext_patch.endswith('.patch')):
checksum = patch_checksums[idx]
error_msg = "Invalid checksum for patch file %s for %s extension in %s: %s"
res = verify_checksum(ext_patch_full, checksum)
self.assertTrue(res, error_msg % (ext_patch, ext_name, ec_fn, checksum))
# check whether all extra_options defined for used easyblock are defined
extra_opts = app.extra_options()
for key in extra_opts:
self.assertTrue(key in app.cfg)
app.close_log()
os.remove(app.logfile)
# inject dummy values for templates that are only known at a later stage
dummy_template_values = {
'builddir': '/dummy/builddir',
'installdir': '/dummy/installdir',
'parallel': '2',
}
ec.template_values.update(dummy_template_values)
ec_dict = ec.parser.get_config_dict()
orig_toolchain = ec_dict['toolchain']
for key in ec_dict:
# skip parameters for which value is equal to default value
orig_val = ec_dict[key]
if key in DEFAULT_CONFIG and orig_val == DEFAULT_CONFIG[key][0]:
continue
if key in extra_opts and orig_val == extra_opts[key][0]:
continue
if key not in DEFAULT_CONFIG and key not in extra_opts:
continue
orig_val = resolve_template(ec_dict[key], ec.template_values)
dumped_val = resolve_template(dumped_ec[key], ec.template_values)
# take into account that dumped value for *dependencies may include hard-coded subtoolchains
# if no easyconfig was found for the dependency with the 'parent' toolchain,
# if may get resolved using a subtoolchain, which is then hardcoded in the dumped easyconfig
if key in DEPENDENCY_PARAMETERS:
# number of dependencies should remain the same
self.assertEqual(len(orig_val), len(dumped_val))
for orig_dep, dumped_dep in zip(orig_val, dumped_val):
# name should always match
self.assertEqual(orig_dep[0], dumped_dep[0])
# version should always match, or be a possibility from the version dict
if isinstance(orig_dep[1], dict):
self.assertTrue(dumped_dep[1] in orig_dep[1].values())
else:
self.assertEqual(orig_dep[1], dumped_dep[1])
# 3rd value is versionsuffix;
if len(dumped_dep) >= 3:
# if no versionsuffix was specified in original dep spec, then dumped value should be empty string
if len(orig_dep) >= 3:
self.assertEqual(dumped_dep[2], orig_dep[2])
else:
self.assertEqual(dumped_dep[2], '')
# 4th value is toolchain spec
if len(dumped_dep) >= 4:
if len(orig_dep) >= 4:
self.assertEqual(dumped_dep[3], orig_dep[3])
else:
# if a subtoolchain is specifed (only) in the dumped easyconfig,
# it should *not* be the same as the parent toolchain
self.assertNotEqual(dumped_dep[3], (orig_toolchain['name'], orig_toolchain['version']))
# take into account that for some string-valued easyconfig parameters (configopts & co),
# the easyblock may have injected additional values, which affects the dumped easyconfig file
elif isinstance(orig_val, string_type):
error_msg = "%s value '%s' should start with '%s'" % (key, dumped_val, orig_val)
self.assertTrue(dumped_val.startswith(orig_val), error_msg)
else:
error_msg = "%s value should be equal in original and dumped easyconfig: '%s' vs '%s'"
self.assertEqual(orig_val, dumped_val, error_msg % (key, orig_val, dumped_val))
# test passed, so set back to True
single_tests_ok = True and prev_single_tests_ok
def suite():
"""Return all easyblock initialisation tests."""
def make_inner_test(spec_path):
def innertest(self):
template_easyconfig_test(self, spec_path)
return innertest
# dynamically generate a separate test for each of the available easyconfigs
# define new inner functions that can be added as class methods to InitTest
easyconfigs_path = get_paths_for('easyconfigs')[0]
cnt = 0
for (subpath, _, specs) in os.walk(easyconfigs_path, topdown=True):
# ignore archived easyconfigs
if '__archive__' in subpath:
continue
for spec in specs:
if spec.endswith('.eb') and spec != 'TEMPLATE.eb':
cnt += 1
innertest = make_inner_test(os.path.join(subpath, spec))
innertest.__doc__ = "Test for easyconfig %s" % spec
# double underscore so parsing tests are run first
innertest.__name__ = "test__parse_easyconfig_%s" % spec
setattr(EasyConfigTest, innertest.__name__, innertest)
print("Found %s easyconfigs..." % cnt)
return TestLoader().loadTestsFromTestCase(EasyConfigTest)
if __name__ == '__main__':
main()
``` |
[
{
"content": "Here is the code block:\n```python\n# Legal boring crap follows. In simple english, you can use this\n# code in your own project, be your project commercial or free.\n# Just be sure to include the license and stuff. The \"copyright\"\n# here is just for technical reasons.\n#\n# Copyright 2011, Phi... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n# Legal boring crap follows. In simple english, you can use this\n# code in your own project, be your project commercial or free.\n# Just be sure to include the license and stuff. The \"copyright\"\n# here is just for technical reasons.\n#\n# Cop... | ```python
# Legal boring crap follows. In simple english, you can use this
# code in your own project, be your project commercial or free.
# Just be sure to include the license and stuff. The "copyright"
# here is just for technical reasons.
#
# Copyright 2011, Philip Peterson.
#
# This file is part of Pumpkinpy.
#
# Pumpkinpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pumpkinpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pumpkinpy. If not, see <http://www.gnu.org/licenses/>.
import sys, os, subprocess
from itertools import ifilter as filtered, imap as mapped, ifilterfalse, chain
path = os.path.dirname(os.path.abspath(__file__))
langs = {
"php": "PHP",
"js": "JavaScript"
}
funcs = [
[
"partition",
"stringfunc",
[
["Hello world! How are you doing today?", " "],
["Hello world! How are you doing today?", ","],
["Hello world! How are you doing today?", ""]
]
],
[
"rpartition",
"stringfunc",
[
["Hello world! How are you doing today?", " "],
["Hello world! How are you doing today?", ","],
["Hello world! How are you doing today?", ""]
]
],
[
"startswith",
"stringfunc",
[
["abcdefgh", ""],
["abcdefgh", "abc"],
["abcdefgh", "a"],
["abcdefgh", "abcdefghi"],
["abcdefgh", "bcdefgh"]
]
],
[
"endswith",
"stringfunc",
[
["abcdefgh", ""],
["abcdefgh", "fgh"],
["abcdefgh", "h"],
["abcdefgh", "abcdefg"],
["abcdefgh", "abcdefghi"],
]
],
[
"rstrip",
"stringfunc",
[
[" Johann went to the store today. "],
["Johann went to the store today. "],
[" Johann went to the store today."],
[" Johann went to the store today. \0"]
]
]
]
####
def itercat(*iterators):
"""Concatenate several iterators into one."""
for i in iterators:
for x in i:
yield x
allfuncs = iter([]) # Find functions for which there are no tests
for lang in langs.keys():
myfuncs = filtered(lambda x: not x.startswith("$"), os.listdir(os.path.join(path, lang, "src"))) # filter out $preamble, etc.
myfuncs = mapped(lambda x: x.rpartition(".")[0], myfuncs)
allfuncs = itercat(myfuncs, allfuncs)
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in ifilterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
allfuncs = unique_everseen(allfuncs)
funcnames = [i[0] for i in funcs]
allfuncs = filtered(lambda fun: not fun in funcnames, allfuncs) # Filter out unsupported items
for unsupportedfunc in allfuncs:
print "[!] No test for", unsupportedfunc
####
results = []
veryverbose_on = "-vv" in sys.argv[1:] or "--very-verbose" in sys.argv[1:]
verbose_on = "-v" in sys.argv[1:] or "--verbose" in sys.argv[1:] or veryverbose_on
def vp(*args):
global verbose_on
if verbose_on:
print " ".join(map(str, args))
def vvp(*args):
global veryverbose_on
if veryverbose_on:
print " ".join(map(str, args))
print "Building all..."
for x, y in langs.items():
vp("Building "+y)
subprocess.check_call([os.path.join(".", x, "build.py")])
for function, kind, tests in funcs:
for test in tests:
if kind == "stringfunc":
string_obj = test[0]
args = test[1:]
success = True
try:
result = string_obj.__getattribute__(function)(*args)
except:
success = False
result = None
else:
raise Exception("Unknown function type `%s`." % kind)
test.append([success, result])
all_results = [] # list of all test results, for generating table in readme, etc.
for lang in langs.items():
vp("\nBeginning unit tests on", lang[1])
execfile(os.path.join(path,"_helpers",lang[0]+'.py'))
thislangsresults = [lang[0], lang[1], {}]
mysupport = thislangsresults[2] # This is a dict that will describe support of each function.
all_results.append(thislangsresults)
for function, kind, tests in funcs:
num_tests = len(tests)
num_passed = 0
for test in tests:
if isSupported(function):
args = test[:-1]
code = genTest(function, kind, args)
result = genResult(code)
passedTest = False
expected = json.dumps(test[-1])
try:
actual = json.dumps(json.loads(result))
passedTest = True
except Exception:
actual = "(parse fail)"
print "Could not parse JSON Output of function "+function+"."
vvp("\tJSON: "+result)
if actual!=expected:
passedTest = False
vp(lang[1]+" failed test in "+function+".")
vvp("\tExpected: "+expected+"\n\tActual: "+actual+"\n\tArgs: "+json.dumps(args))
if passedTest:
num_passed += 1
else:
vp(lang[1], "does not support", function+".", "Skipping.")
mysupport[function] = [num_passed, num_tests]
# Display overall results of the tests
print "\nTest results: "
allsuccess = True
for result in all_results:
support = result[2]
for func, fract in support.items():
if fract[0] != fract[1]:
allsuccess = False
print result[0], func, "(", fract[0], "/", fract[1], ")"
if allsuccess:
print "All tests successful."
execfile("_helpers/_gentable.py")
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\"\"\"\nccp.client\n~~~~~~~~~~~~\n\nThis module implements the Changelog API.\n\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\n\nimport sys\nimport requests\nimport json\nfrom time import time\nimport logging\nfrom pkg_resources import get_distribution... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nccp.client\n~~~~~~~~~~~~\n\nThis module implements the Changelog API.\n\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\n\nimport sys\nimport requests\nimport json\nfrom time import time\nimport logging\nfrom pkg_resources import ... | ```python
# -*- coding: utf-8 -*-
"""
ccp.client
~~~~~~~~~~~~
This module implements the Changelog API.
:license: MIT, see LICENSE for more details.
"""
import sys
import requests
import json
from time import time
import logging
from pkg_resources import get_distribution
API_HOST = "localhost"
API_PORT = 5000
SEVERITY = dict(INFO=1, NOTIFICATION=2, WARNING=3, ERROR=4, CRITICAL=5)
class Client(object):
def __init__(self, host=API_HOST, port=API_PORT):
self.host = host
self.port = port
self.endpoint = "/api/events"
self.logger = logging.getLogger('changelog_client')
def deflate_severity(self, severity):
if isinstance(severity, int):
return severity
return SEVERITY[severity]
def send(self, message, severity, category="misc", extra_headers=None, extra_fields=None):
headers = {
"User-Agent": "ccp/client v.%s" % get_distribution("ccp").version
}
url = self.get_url()
self.logger.info('Sending changelog event to %s' % url)
headers["Content-Type"] = "application/json"
if extra_headers is not None:
headers.update(extra_headers)
data = {
"criticality": "%d" % self.deflate_severity(severity),
"unix_timestamp": "%d" % time(),
"category": category,
"description": message
}
if extra_fields:
data.update(extra_fields)
try:
response = requests.post(
url, headers=headers, data=json.dumps(data))
if "OK" in response.text:
return True
else:
self.logger.error(
"Failed to send changelog message to server: %s" % response.text)
except Exception:
exc_info = sys.exc_info()
self.logger.exception(
"Failed to send changelog message to server")
raise exc_info[1], None, exc_info[2]
def get_url(self):
port = "" if self.port == 80 else ":%d" % self.port
protocol = "https://" if self.port == 443 else "http://"
base_full_url = "%s%s%s%s" % (protocol, self.host, port, self.endpoint)
return base_full_url
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\nimport zstackwoodpecker.test_state as ts_header\nimport os\nTestAction = ts_header.TestAction\ndef path():\n\n return dict(initial_formation=\"template5\", checking_point=8, path_list=[\n\t\t[TestAction.create_vm, 'vm1', ],\n... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\nimport zstackwoodpecker.test_state as ts_header\nimport os\nTestAction = ts_header.TestAction\ndef path():\n\n return dict(initial_formation=\"template5\", checking_point=8, path_list=[\n\t\t[TestAction.create... | ```python
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot5'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot6'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot7'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot8'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot9'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot13'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot17'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'volume2-snapshot1'],
[TestAction.start_vm, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'vm1-root-snapshot6'],
[TestAction.start_vm, 'vm1'],
[TestAction.reboot_vm, 'vm1'],
[TestAction.batch_delete_snapshots, ['volume2-snapshot9','volume2-snapshot13',]],
[TestAction.batch_delete_snapshots, ['volume3-snapshot13','vm1-root-snapshot6',]],
[TestAction.delete_volume_snapshot, 'vm1-snapshot13'],
])
'''
The final status:
Running:['vm1']
Stopped:[]
Enadbled:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-root-snapshot5', 'vm1-root-snapshot7', 'vm1-root-snapshot8', 'vm1-snapshot9', 'volume1-snapshot9', 'volume3-snapshot9', 'volume1-snapshot13', 'vm1-root-snapshot17']
attached:['volume1', 'volume2', 'volume3']
Detached:[]
Deleted:['volume2-snapshot9', 'volume2-snapshot13', 'volume3-snapshot13', 'vm1-root-snapshot6', 'vm1-snapshot13']
Expunged:[]
Ha:[]
Group:
vm_snap1:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']---vm1volume1_volume2_volume3
'''
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n# -*- coding: utf-8 -*-\n\nEGSPHAN_EXT = \".egsphant\"\nEGSINP_EXT = \".egsinp\"\nEGSPHSF_EXT = \".egsphsp1\"\n\ndef make_cup_prefix(radUnit, outerCup, innerCupSer, innerCupNum):\n \"\"\"\n Makes filename prefix given RU,... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nEGSPHAN_EXT = \".egsphant\"\nEGSINP_EXT = \".egsinp\"\nEGSPHSF_EXT = \".egsphsp1\"\n\ndef make_cup_prefix(radUnit, outerCup, innerCupSer, innerCupNum):\n \"\"\"\n Makes filename ... | ```python
# -*- coding: utf-8 -*-
EGSPHAN_EXT = ".egsphant"
EGSINP_EXT = ".egsinp"
EGSPHSF_EXT = ".egsphsp1"
def make_cup_prefix(radUnit, outerCup, innerCupSer, innerCupNum):
"""
Makes filename prefix given RU, OC, IC info
Parameters
----------
radUnit: string
radiation unit
outerCup: string
outer cup info
innerCupSer: string
inner cup serial line
innerCupNum: integer
inner cup number
returns: string
clinical cup name
"""
return "R" + radUnit + "O" + outerCup + "I" + innerCupSer + innerCupNum
return "R" + radUnit + "O" + outerCup + "I" + innerCupSer + innerCupNum
def make_qualified_name(file_prefix, cl, shot):
"""
Makes qualified name
Parameters
----------
file_prefix: string
prefix with RU and cup info
cl: collimator
collimator info
shot: (float,float) tuple
shot position
returns: string
fully qualified cup name
"""
return file_prefix + str(cl) + "_" + "Y{0}Z{1}".format(int(shot[0]),int(shot[1]))
def make_egsinput_name(full_prefix):
"""
Makes EGS input name
"""
return full_prefix + EGSINP_EXT
def parse_file_prefix(s):
"""
Parse file prefix string and produce rad.unit, outer cup, inner cup, inner cup number, collimator
"""
radUnit = str(s[1:2])
outerCup = str(s[3:4])
innerCupSer = str(s[5:6])
innerCupNum = str(s[6:8])
coll = int(str(s[9:11]))
return (radUnit, outerCup, innerCupSer, innerCupNum, coll)
def parse_shot(s):
"""
Parse input string to extract shot
"""
idx_shot = s.find("_")
if idx_shot < 0:
raise ValueError("No shot info in input")
sh = s[idx_shot+1:]
idx_Y = sh.find("Y")
if idx_Y < 0:
raise ValueError("No Y shot position in input")
idx_Z = sh.find("Z")
if idx_Z < 0:
raise ValueError("No Z shot position in input")
sh_Y = sh[idx_Y+1:idx_Z]
sh_Z = sh[idx_Z+1:]
return (float(sh_Y), float(sh_Z))
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\nfrom ..osid import objects as osid_objects\nfrom ..osid import markers as osid_markers\nfrom ..osid import sessions as osid_sessions\n\n\nclass Grade(osid_objects.OsidObject, osid_markers.Subjugateable):\n \"\"\"A ``Grade``.\n\n ... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nfrom ..osid import objects as osid_objects\nfrom ..osid import markers as osid_markers\nfrom ..osid import sessions as osid_sessions\n\n\nclass Grade(osid_objects.OsidObject, osid_markers.Subjugateable):\n \"\"\"A ``... | ```python
from ..osid import objects as osid_objects
from ..osid import markers as osid_markers
from ..osid import sessions as osid_sessions
class Grade(osid_objects.OsidObject, osid_markers.Subjugateable):
"""A ``Grade``.
Grades represent qualified performance levels defined within some
grading system.
"""
def get_grade_system_id(self):
"""Gets the ``GradeSystem Id`` in which this grade belongs.
:return: the grade system ``Id``
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
grade_system_id = property(fget=get_grade_system_id)
def get_grade_system(self):
"""Gets the ``GradeSystem`` in which this grade belongs.
:return: the grade system
:rtype: ``osid.grading.GradeSystem``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeSystem
grade_system = property(fget=get_grade_system)
def get_input_score_start_range(self):
"""Gets the low end of the input score range equivalent to this grade.
:return: the start range
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
input_score_start_range = property(fget=get_input_score_start_range)
def get_input_score_end_range(self):
"""Gets the high end of the input score range equivalent to this grade.
:return: the end range
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
input_score_end_range = property(fget=get_input_score_end_range)
def get_output_score(self):
"""Gets the output score for this grade used for calculating cumultives or performing articulation.
:return: the output score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
output_score = property(fget=get_output_score)
def get_grade_record(self, grade_record_type):
"""Gets the grade record corresponding to the given ``Grade`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``grade_record_type`` may be the ``Type``
returned in ``get_record_types()`` or any of its parents in a
``Type`` hierarchy where ``has_record_type(grade_record_type)``
is ``true`` .
:param grade_record_type: the type of the record to retrieve
:type grade_record_type: ``osid.type.Type``
:return: the grade record
:rtype: ``osid.grading.records.GradeRecord``
:raise: ``NullArgument`` -- ``grade_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(grade_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradeRecord
class GradeForm(osid_objects.OsidObjectForm, osid_objects.OsidSubjugateableForm):
"""This is the form for creating and updating ``Grades``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``GradeAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
def get_input_score_start_range_metadata(self):
"""Gets the metadata for the input score start range.
:return: metadata for the input score start range
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
input_score_start_range_metadata = property(fget=get_input_score_start_range_metadata)
def set_input_score_start_range(self, score):
"""Sets the input score start range.
:param score: the new start range
:type score: ``decimal``
:raise: ``InvalidArgument`` -- ``score`` is invalid
:raise: ``NoAccess`` -- ``range`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_input_score_start_range(self):
"""Clears the input score start.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
input_score_start_range = property(fset=set_input_score_start_range, fdel=clear_input_score_start_range)
def get_input_score_end_range_metadata(self):
"""Gets the metadata for the input score start range.
:return: metadata for the input score start range
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
input_score_end_range_metadata = property(fget=get_input_score_end_range_metadata)
def set_input_score_end_range(self, score):
"""Sets the input score start range.
:param score: the new start range
:type score: ``decimal``
:raise: ``InvalidArgument`` -- ``score`` is invalid
:raise: ``NoAccess`` -- ``range`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_input_score_end_range(self):
"""Clears the input score start.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
input_score_end_range = property(fset=set_input_score_end_range, fdel=clear_input_score_end_range)
def get_output_score_metadata(self):
"""Gets the metadata for the output score start range.
:return: metadata for the output score start range
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
output_score_metadata = property(fget=get_output_score_metadata)
def set_output_score(self, score):
"""Sets the output score.
:param score: the new output score
:type score: ``decimal``
:raise: ``InvalidArgument`` -- ``score`` is invalid
:raise: ``NoAccess`` -- ``score`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_output_score(self):
"""Clears the output score.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
output_score = property(fset=set_output_score, fdel=clear_output_score)
def get_grade_form_record(self, grade_record_type):
"""Gets the ``GradeFormRecord`` corresponding to the given grade record ``Type``.
:param grade_record_type: the grade record type
:type grade_record_type: ``osid.type.Type``
:return: the grade form record
:rtype: ``osid.grading.records.GradeFormRecord``
:raise: ``NullArgument`` -- ``grade_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(grade_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradeFormRecord
class GradeList(osid_objects.OsidList):
"""Like all ``OsidLists,`` ``GradeList`` provides a means for accessing ``Grade`` elements sequentially either one at a time or many at a time.
Examples: while (gl.hasNext()) { Grade grade = gl.getNextGrade(); }
or
while (gl.hasNext()) {
Grade[] grades = gl.getNextGrades(gl.available());
}
"""
def get_next_grade(self):
"""Gets the next ``Grade`` in this list.
:return: the next ``Grade`` in this list. The ``has_next()`` method should be used to test that a next ``Grade`` is available before calling this method.
:rtype: ``osid.grading.Grade``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.Grade
next_grade = property(fget=get_next_grade)
def get_next_grades(self, n):
"""Gets the next set of ``Grade`` elements in this list which must be less than or equal to the return from ``available()``.
:param n: the number of ``Grade`` elements requested which must be less than or equal to ``available()``
:type n: ``cardinal``
:return: an array of ``Grade`` elements.The length of the array is less than or equal to the number specified.
:rtype: ``osid.grading.Grade``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.Grade
class GradeSystem(osid_objects.OsidObject, osid_markers.Aggregateable):
"""A ``GradeSystem`` represents a grading system.
The system can be based on assigned Grades or based on a numeric
scale.
"""
def is_based_on_grades(self):
"""Tests if the grading system is based on grades.
:return: true if the grading system is based on grades, ``false`` if the system is a numeric score
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_grade_ids(self):
"""Gets the grade ``Ids`` in this system ranked from highest to lowest.
:return: the list of grades ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``IllegalState`` -- ``is_based_on_grades()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
grade_ids = property(fget=get_grade_ids)
def get_grades(self):
"""Gets the grades in this system ranked from highest to lowest.
:return: the list of grades
:rtype: ``osid.grading.GradeList``
:raise: ``IllegalState`` -- ``is_based_on_grades()`` is ``false``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeList
grades = property(fget=get_grades)
def get_lowest_numeric_score(self):
"""Gets the lowest number in a numeric grading system.
:return: the lowest number
:rtype: ``decimal``
:raise: ``IllegalState`` -- ``is_based_on_grades()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
lowest_numeric_score = property(fget=get_lowest_numeric_score)
def get_numeric_score_increment(self):
"""Gets the incremental step.
:return: the increment
:rtype: ``decimal``
:raise: ``IllegalState`` -- ``is_based_on_grades()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
numeric_score_increment = property(fget=get_numeric_score_increment)
def get_highest_numeric_score(self):
"""Gets the highest number in a numeric grading system.
:return: the highest number
:rtype: ``decimal``
:raise: ``IllegalState`` -- ``is_based_on_grades()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
highest_numeric_score = property(fget=get_highest_numeric_score)
def get_grade_system_record(self, grade_system_record_type):
"""Gets the grade system record corresponding to the given ``GradeSystem`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``grade_system_record_type`` may be the
``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(grade_system_record_type)`` is ``true`` .
:param grade_system_record_type: the type of the record to retrieve
:type grade_system_record_type: ``osid.type.Type``
:return: the grade system record
:rtype: ``osid.grading.records.GradeSystemRecord``
:raise: ``NullArgument`` -- ``grade_system_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(grade_system_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradeSystemRecord
class GradeSystemForm(osid_objects.OsidObjectForm, osid_objects.OsidAggregateableForm):
"""This is the form for creating and updating ``GradeSystems``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``GradeSystemAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
def get_based_on_grades_metadata(self):
"""Gets the metadata for a grade-based designation.
:return: metadata for the grade-based designation
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
based_on_grades_metadata = property(fget=get_based_on_grades_metadata)
def set_based_on_grades(self, grades):
"""Sets the grade-based designation.
:param grades: the grade-based designation
:type grades: ``boolean``
:raise: ``InvalidArgument`` -- ``grades`` is invalid
:raise: ``NoAccess`` -- ``grades`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_based_on_grades(self):
"""Clears the based on grades designation.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
based_on_grades = property(fset=set_based_on_grades, fdel=clear_based_on_grades)
def get_lowest_numeric_score_metadata(self):
"""Gets the metadata for the lowest numeric score.
:return: metadata for the lowest numeric score
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
lowest_numeric_score_metadata = property(fget=get_lowest_numeric_score_metadata)
def set_lowest_numeric_score(self, score):
"""Sets the lowest numeric score.
:param score: the lowest numeric score
:type score: ``decimal``
:raise: ``InvalidArgument`` -- ``score`` is invalid
:raise: ``NoAccess`` -- ``score`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_lowest_numeric_score(self):
"""Clears the lowest score.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
lowest_numeric_score = property(fset=set_lowest_numeric_score, fdel=clear_lowest_numeric_score)
def get_numeric_score_increment_metadata(self):
"""Gets the metadata for the lowest numeric score.
:return: metadata for the lowest numeric score
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
numeric_score_increment_metadata = property(fget=get_numeric_score_increment_metadata)
def set_numeric_score_increment(self, increment):
"""Sets the numeric score increment.
:param increment: the numeric score increment
:type increment: ``decimal``
:raise: ``InvalidArgument`` -- ``increment`` is invalid
:raise: ``NoAccess`` -- ``increment`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_numeric_score_increment(self):
"""Clears the numeric score increment.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
numeric_score_increment = property(fset=set_numeric_score_increment, fdel=clear_numeric_score_increment)
def get_highest_numeric_score_metadata(self):
"""Gets the metadata for the highest numeric score.
:return: metadata for the highest numeric score
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
highest_numeric_score_metadata = property(fget=get_highest_numeric_score_metadata)
def set_highest_numeric_score(self, score):
"""Sets the highest numeric score.
:param score: the highest numeric score
:type score: ``decimal``
:raise: ``InvalidArgument`` -- ``score`` is invalid
:raise: ``NoAccess`` -- ``score`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_highest_numeric_score(self):
"""Clears the highest numeric score.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
highest_numeric_score = property(fset=set_highest_numeric_score, fdel=clear_highest_numeric_score)
def get_grade_system_form_record(self, grade_system_record_type):
"""Gets the ``GradeSystemFormRecord`` corresponding to the given grade system record ``Type``.
:param grade_system_record_type: the grade system record type
:type grade_system_record_type: ``osid.type.Type``
:return: the grade system form record
:rtype: ``osid.grading.records.GradeSystemFormRecord``
:raise: ``NullArgument`` -- ``grade_system_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(grade_system_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradeSystemFormRecord
class GradeSystemList(osid_objects.OsidList):
"""Like all ``OsidLists,`` ``GradeSystemList`` provides a means for accessing ``GradeSystem`` elements sequentially either one at a time or many at a time.
Examples: while (gsl.hasNext()) { GradeSystem system =
gsl.getNextGradeSystem(); }
or
while (gsl.hasNext()) {
GradeSystem[] systems = gsl.getNextGradeSystems(gsl.available());
}
"""
def get_next_grade_system(self):
"""Gets the next ``GradeSystem`` in this list.
:return: the next ``GradeSystem`` in this list. The ``has_next()`` method should be used to test that a next ``GradeSystem`` is available before calling this method.
:rtype: ``osid.grading.GradeSystem``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeSystem
next_grade_system = property(fget=get_next_grade_system)
def get_next_grade_systems(self, n):
"""Gets the next set of ``GradeSystem`` elements in this list which must be less than or equal to the return from ``available()``.
:param n: the number of ``GradeSystem`` elements requested which must be less than or equal to ``available()``
:type n: ``cardinal``
:return: an array of ``GradeSystem`` elements.The length of the array is less than or equal to the number specified.
:rtype: ``osid.grading.GradeSystem``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeSystem
class GradeEntry(osid_objects.OsidRelationship):
"""A ``GradeEntry`` represents an entry in a ``Gradebook``."""
def get_gradebook_column_id(self):
"""Gets the ``Id`` of the ``GradebookColumn``.
:return: the ``Id`` of the ``GradebookColumn``
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
gradebook_column_id = property(fget=get_gradebook_column_id)
def get_gradebook_column(self):
"""Gets the ``GradebookColumn``.
:return: the ``GradebookColumn``
:rtype: ``osid.grading.GradebookColumn``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookColumn
gradebook_column = property(fget=get_gradebook_column)
def get_key_resource_id(self):
"""Gets the ``Id`` of the key resource of this entry.
The key resource may be a student or other applicable key to
identify a row of grading entries.
:return: ``Id`` of the key resource
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
key_resource_id = property(fget=get_key_resource_id)
def get_key_resource(self):
"""Gets the key resource of this entry.
The key resource may be a student or other applicable key to
identify a row of grading entries.
:return: the key resource
:rtype: ``osid.resource.Resource``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.resource.Resource
key_resource = property(fget=get_key_resource)
def is_derived(self):
"""Tests if this is a calculated entry.
:return: ``true`` if this entry is a calculated entry, ``false`` otherwise. If ``true,`` then ``overrides_calculated_entry()`` must be ``false``.
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def overrides_calculated_entry(self):
"""Tests if this is a manual entry that overrides a calculated entry.
:return: ``true`` if this entry overrides a calculated entry, ``false`` otherwise. If ``true,`` then ``is_derived()`` must be ``false``.
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_overridden_calculated_entry_id(self):
"""Gets the calculated entry ``Id`` this entry overrides.
:return: the calculated entry ``Id``
:rtype: ``osid.id.Id``
:raise: ``IllegalState`` -- ``overrides_derived_entry()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
overridden_calculated_entry_id = property(fget=get_overridden_calculated_entry_id)
def get_overridden_calculated_entry(self):
"""Gets the calculated entry this entry overrides.
:return: the calculated entry
:rtype: ``osid.grading.GradeEntry``
:raise: ``IllegalState`` -- ``overrides_calculated_entry()`` is ``false``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeEntry
overridden_calculated_entry = property(fget=get_overridden_calculated_entry)
def is_ignored_for_calculations(self):
"""Tests if this is entry should be ignored in any averaging, scaling or curve calculation.
:return: ``true`` if this entry is ignored, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def is_graded(self):
"""Tests if a grade or score has been assigned to this entry.
Generally, an entry is created with a grade or score.
:return: ``true`` if a grade has been assigned, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_grade_id(self):
"""Gets the grade ``Id`` in this entry if the grading system is based on grades.
:return: the grade ``Id``
:rtype: ``osid.id.Id``
:raise: ``IllegalState`` -- ``is_graded()`` is ``false`` or ``GradeSystem.isBasedOnGrades()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
grade_id = property(fget=get_grade_id)
def get_grade(self):
"""Gets the grade in this entry if the grading system is based on grades.
:return: the grade
:rtype: ``osid.grading.Grade``
:raise: ``IllegalState`` -- ``is_graded()`` is ``false`` or ``GradeSystem.isBasedOnGrades()`` is ``false``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.Grade
grade = property(fget=get_grade)
def get_score(self):
"""Gets the score in this entry if the grading system is not based on grades.
:return: the score
:rtype: ``decimal``
:raise: ``IllegalState`` -- ``is_graded()`` is ``false`` or ``GradeSystem.isBasedOnGrades()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
score = property(fget=get_score)
def get_time_graded(self):
"""Gets the time the gradeable object was graded.
:return: the timestamp of the grading entry
:rtype: ``osid.calendaring.DateTime``
:raise: ``IllegalState`` -- ``is_graded()`` is ``false`` or ``is_derived()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.DateTime
time_graded = property(fget=get_time_graded)
def get_grader_id(self):
"""Gets the ``Id`` of the ``Resource`` that created this entry.
:return: the ``Id`` of the ``Resource``
:rtype: ``osid.id.Id``
:raise: ``IllegalState`` -- ``is_graded()`` is ``false`` or ``is_derived()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
grader_id = property(fget=get_grader_id)
def get_grader(self):
"""Gets the ``Resource`` that created this entry.
:return: the ``Resource``
:rtype: ``osid.resource.Resource``
:raise: ``IllegalState`` -- ``is_graded() is false or is_derived() is true``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.resource.Resource
grader = property(fget=get_grader)
def get_grading_agent_id(self):
"""Gets the ``Id`` of the ``Agent`` that created this entry.
:return: the ``Id`` of the ``Agent``
:rtype: ``osid.id.Id``
:raise: ``IllegalState`` -- ``is_graded()`` is ``false`` or ``is_derived()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
grading_agent_id = property(fget=get_grading_agent_id)
def get_grading_agent(self):
"""Gets the ``Agent`` that created this entry.
:return: the ``Agent``
:rtype: ``osid.authentication.Agent``
:raise: ``IllegalState`` -- ``is_graded() is false or is_derived() is true``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authentication.Agent
grading_agent = property(fget=get_grading_agent)
def get_grade_entry_record(self, grade_entry_record_type):
"""Gets the grade entry record corresponding to the given ``GradeEntry`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``grade_entry_record_type`` may be the
``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(grade_entry_record_type)`` is ``true`` .
:param grade_entry_record_type: the type of the record to retrieve
:type grade_entry_record_type: ``osid.type.Type``
:return: the grade entry record
:rtype: ``osid.grading.records.GradeEntryRecord``
:raise: ``NullArgument`` -- ``grade_entry_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(grade_entry_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradeEntryRecord
class GradeEntryForm(osid_objects.OsidRelationshipForm):
"""This is the form for creating and updating ``GradeEntries``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``GradeEntryAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
def get_ignored_for_calculations_metadata(self):
"""Gets the metadata for the ignore flag.
:return: metadata for the ignore flag
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
ignored_for_calculations_metadata = property(fget=get_ignored_for_calculations_metadata)
def set_ignored_for_calculations(self, ignore):
"""Sets the ignore for calculations flag.
:param ignore: the new ignore flag
:type ignore: ``boolean``
:raise: ``InvalidArgument`` -- ``ignore`` is invalid
:raise: ``NoAccess`` -- ``ignore`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_ignored_for_calculations(self):
"""Clears the ignore for calculations flag.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
ignored_for_calculations = property(fset=set_ignored_for_calculations, fdel=clear_ignored_for_calculations)
def get_grade_metadata(self):
"""Gets the metadata for a grade.
:return: metadata for the grade
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
grade_metadata = property(fget=get_grade_metadata)
def set_grade(self, grade_id):
"""Sets the grade.
:param grade_id: the new grade
:type grade_id: ``osid.id.Id``
:raise: ``InvalidArgument`` -- ``grade_id`` is invalid or ``GradebookColumn.getGradeSystem().isBasedOnGrades()`` is ``false``
:raise: ``NoAccess`` -- ``grade_id`` cannot be modified
:raise: ``NullArgument`` -- ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_grade(self):
"""Clears the grade.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
grade = property(fset=set_grade, fdel=clear_grade)
def get_score_metadata(self):
"""Gets the metadata for a score.
:return: metadata for the score
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
score_metadata = property(fget=get_score_metadata)
def set_score(self, score):
"""Sets the score.
:param score: the new score
:type score: ``decimal``
:raise: ``InvalidArgument`` -- ``score`` is invalid or ``GradebookColumn.getGradeSystem().isBasedOnGrades()`` is ``true``
:raise: ``NoAccess`` -- ``score`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_score(self):
"""Clears the score.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
score = property(fset=set_score, fdel=clear_score)
def get_grade_entry_form_record(self, grade_entry_record_type):
"""Gets the ``GradeEntryFormRecord`` corresponding to the given grade entry record ``Type``.
:param grade_entry_record_type: the grade entry record type
:type grade_entry_record_type: ``osid.type.Type``
:return: the grade entry form record
:rtype: ``osid.grading.records.GradeEntryFormRecord``
:raise: ``NullArgument`` -- ``grade_entry_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(grade_entry_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradeEntryFormRecord
class GradeEntryList(osid_objects.OsidList):
"""Like all ``OsidLists,`` ``GradeEntryList`` provides a means for accessing ``GradeEntry`` elements sequentially either one at a time or many at a time.
Examples: while (gel.hasNext()) { GradeEntry entry =
gel.getNextGradeEntry(); }
or
while (gel.hasNext()) {
GradeEntry[] entries = gel.getNextGradeEntries(gel.available());
}
"""
def get_next_grade_entry(self):
"""Gets the next ``GradeEntry`` in this list.
:return: the next ``GradeEntry`` in this list. The ``has_next()`` method should be used to test that a next ``GradeEntry`` is available before calling this method.
:rtype: ``osid.grading.GradeEntry``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeEntry
next_grade_entry = property(fget=get_next_grade_entry)
def get_next_grade_entries(self, n):
"""Gets the next set of ``GradeEntry`` elements in this list which must be less than or equal to the number returned from ``available()``.
:param n: the number of ``GradeEntry`` elements requested which should be less than or equal to ``available()``
:type n: ``cardinal``
:return: an array of ``GradeEntry`` elements.The length of the array is less than or equal to the number specified.
:rtype: ``osid.grading.GradeEntry``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeEntry
class GradebookColumn(osid_objects.OsidObject):
"""A ``GradebookColumn`` represents a series of grade entries in a gradebook.
Each GradeEntry in a column share the same ``GradeSystem``.
"""
def get_grade_system_id(self):
"""Gets the ``GradeSystem Id`` in which this grade belongs.
:return: the grade system ``Id``
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
grade_system_id = property(fget=get_grade_system_id)
def get_grade_system(self):
"""Gets the ``GradeSystem`` in which this grade belongs.
:return: the package grade system
:rtype: ``osid.grading.GradeSystem``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeSystem
grade_system = property(fget=get_grade_system)
def get_gradebook_column_record(self, gradebook_column_record_type):
"""Gets the gradebook column record corresponding to the given ``GradeBookColumn`` record ``Type``.
This method ie used to retrieve an object implementing the
requested record. The ``gradebook_column_record_type`` may be
the ``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(gradebook_column_record_type)`` is ``true`` .
:param gradebook_column_record_type: the type of the record to retrieve
:type gradebook_column_record_type: ``osid.type.Type``
:return: the gradebook column record
:rtype: ``osid.grading.records.GradebookColumnRecord``
:raise: ``NullArgument`` -- ``gradebook_column_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(gradebook_column_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradebookColumnRecord
class GradebookColumnForm(osid_objects.OsidObjectForm):
"""This is the form for creating and updating ``GradebookColumns``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``GradebookAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
def get_grade_system_metadata(self):
"""Gets the metadata for a grade system.
:return: metadata for the grade system
:rtype: ``osid.Metadata``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.Metadata
grade_system_metadata = property(fget=get_grade_system_metadata)
def set_grade_system(self, grade_system_id):
"""Sets the grade system.
:param grade_system_id: the new grade system
:type grade_system_id: ``osid.id.Id``
:raise: ``InvalidArgument`` -- ``grade_system_id`` is invalid
:raise: ``NoAccess`` -- ``grade_system_id`` cannot be modified
:raise: ``NullArgument`` -- ``grade_system_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_grade_system(self):
"""Clears the grade system.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
grade_system = property(fset=set_grade_system, fdel=clear_grade_system)
def get_gradebook_column_form_record(self, gradebook_column_record_type):
"""Gets the ``GradebookColumnFormRecord`` corresponding to the given gradebook column record ``Type``.
:param gradebook_column_record_type: a gradebook column record type
:type gradebook_column_record_type: ``osid.type.Type``
:return: the gradebook column form record
:rtype: ``osid.grading.records.GradebookColumnFormRecord``
:raise: ``NullArgument`` -- ``gradebook_column_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(gradebook_column_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradebookColumnFormRecord
class GradebookColumnList(osid_objects.OsidList):
"""Like all ``OsidLists,`` ``GradebookColumnList`` provides a means for accessing ``GradebookColumn`` elements sequentially either one at a time or many at a time.
Examples: while (gcl.hasNext()) { GradebookColumn column =
gcl.getNextGradebookColumn(); }
or
while (gcl.hasNext()) {
GradebookColumn[] columns = gcl.getNextGradebookColumns(gcl.available());
}
"""
def get_next_gradebook_column(self):
"""Gets the next ``GradebookColumn`` in this list.
:return: the next ``GradebookColumn`` in this list. The ``has_next()`` method should be used to test that a next ``GradebookColumn`` is available before calling this method.
:rtype: ``osid.grading.GradebookColumn``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookColumn
next_gradebook_column = property(fget=get_next_gradebook_column)
def get_next_gradebook_columns(self, n):
"""Gets the next set of ``GradebookColumn`` elements in this list which must be less than or equal to the return from ``available()``.
:param n: the number of ``GradebookColumn`` elements requested which must be less than or equal to ``available()``
:type n: ``cardinal``
:return: an array of ``GradebookColumn`` elements.The length of the array is less than or equal to the number specified.
:rtype: ``osid.grading.GradebookColumn``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookColumn
class GradebookColumnSummary(osid_objects.OsidObject):
"""A ``GradebookColumnSummary`` is a summary of all entries within a gradebook column."""
def get_gradebook_column_id(self):
"""Gets the ``Id`` of the ``GradebookColumn``.
:return: the ``Id`` of the ``GradebookColumn``
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
gradebook_column_id = property(fget=get_gradebook_column_id)
def get_gradebook_column(self):
"""Gets the ``GradebookColumn``.
:return: the ``GradebookColumn``
:rtype: ``osid.grading.GradebookColumn``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookColumn
gradebook_column = property(fget=get_gradebook_column)
def get_mean(self):
"""Gets the mean score.
If this system is based on grades, the mean output score is
returned.
:return: the mean score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
mean = property(fget=get_mean)
def get_median(self):
"""Gets the median score.
If this system is based on grades, the mean output score is
returned.
:return: the median score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
median = property(fget=get_median)
def get_mode(self):
"""Gets the mode of the score.
If this system is based on grades, the mode of the output score
is returned.
:return: the median score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
mode = property(fget=get_mode)
def get_rms(self):
"""Gets the root mean square of the score.
If this system is based on grades, the RMS of the output score
is returned.
:return: the median score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
rms = property(fget=get_rms)
def get_standard_deviation(self):
"""Gets the standard deviation.
If this system is based on grades, the spread of the output
scores is returned.
:return: the standard deviation
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
standard_deviation = property(fget=get_standard_deviation)
def get_sum(self):
"""Gets the sum of the scores.
If this system is based on grades, the sum of the output scores
is returned.
:return: the median score
:rtype: ``decimal``
*compliance: mandatory -- This method must be implemented.*
"""
return # decimal
sum = property(fget=get_sum)
def get_gradebook_column_summary_record(self, gradebook_column_summary_record_type):
"""Gets the gradebook column summary record corresponding to the given ``GradebookColumnSummary`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``gradebook_column_summary_record_type``
may be the ``Type`` returned in ``get_record_types()`` or any of
its parents in a ``Type`` hierarchy where
``has_record_type(gradebook_column_summary_record_type)`` is
``true`` .
:param gradebook_column_summary_record_type: the type of the record to retrieve
:type gradebook_column_summary_record_type: ``osid.type.Type``
:return: the gradebook column summary record
:rtype: ``osid.grading.records.GradebookColumnSummaryRecord``
:raise: ``NullArgument`` -- ``gradebook_column_summary_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(gradebook_column_summary_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradebookColumnSummaryRecord
class Gradebook(osid_objects.OsidCatalog, osid_sessions.OsidSession):
"""A gradebook defines a collection of grade entries."""
def get_gradebook_record(self, gradebook_record_type):
"""Gets the gradebook record corresponding to the given ``Gradebook`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``gradebook_record_type`` may be the
``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(gradebook_record_type)`` is ``true`` .
:param gradebook_record_type: a gradebook record type
:type gradebook_record_type: ``osid.type.Type``
:return: the gradebook record
:rtype: ``osid.grading.records.GradebookRecord``
:raise: ``NullArgument`` -- ``gradebook_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(gradebook_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradebookRecord
class GradebookForm(osid_objects.OsidCatalogForm):
"""This is the form for creating and updating ``Gradebooks``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``GradebookAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
def get_gradebook_form_record(self, gradebook_record_type):
"""Gets the ``GradebookFormRecord`` corresponding to the given gradebook record ``Type``.
:param gradebook_record_type: a gradebook record type
:type gradebook_record_type: ``osid.type.Type``
:return: the gradebook form record
:rtype: ``osid.grading.records.GradebookFormRecord``
:raise: ``NullArgument`` -- ``gradebook_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(gradebook_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.records.GradebookFormRecord
class GradebookList(osid_objects.OsidList):
"""Like all ``OsidLists,`` ``GradebookList`` provides a means for accessing ``Gradebook`` elements sequentially either one at a time or many at a time.
Examples: while (gl.hasNext()) { Gradebook gradebook =
gl.getNextGradebook(); }
or
while (gl.hasNext()) {
Gradebook[] gradebooks = gl.getNextGradebooks(gl.available());
}
"""
def get_next_gradebook(self):
"""Gets the next ``Gradebook`` in this list.
:return: the next ``Gradebook`` in this list. The ``has_next()`` method should be used to test that a next ``Gradebook`` is available before calling this method.
:rtype: ``osid.grading.Gradebook``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.Gradebook
next_gradebook = property(fget=get_next_gradebook)
def get_next_gradebooks(self, n):
"""Gets the next set of ``Gradebook`` elements in this list which must be less than or equal to the return from ``available()``.
:param n: the number of ``Gradebook`` elements requested which must be less than or equal to ``available()``
:type n: ``cardinal``
:return: an array of ``Gradebook`` elements.The length of the array is less than or equal to the number specified.
:rtype: ``osid.grading.Gradebook``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.Gradebook
class GradebookNode(osid_objects.OsidNode):
"""This interface is a container for a partial hierarchy retrieval.
The number of hierarchy levels traversable through this interface
depend on the number of levels requested in the
``GradebookHierarchySession``.
"""
def get_gradebook(self):
"""Gets the ``Gradebook`` at this node.
:return: the gradebook represented by this node
:rtype: ``osid.grading.Gradebook``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.Gradebook
gradebook = property(fget=get_gradebook)
def get_parent_gradebook_nodes(self):
"""Gets the parents of this gradebook.
:return: the parents of the ``id``
:rtype: ``osid.grading.GradebookNodeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookNodeList
parent_gradebook_nodes = property(fget=get_parent_gradebook_nodes)
def get_child_gradebook_nodes(self):
"""Gets the children of this gradebook.
:return: the children of this gradebook
:rtype: ``osid.grading.GradebookNodeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookNodeList
child_gradebook_nodes = property(fget=get_child_gradebook_nodes)
class GradebookNodeList(osid_objects.OsidList):
"""Like all ``OsidLists,`` ``GradebookNodeList`` provides a means for accessing ``GradebookNode`` elements sequentially either one at a time or many at a time.
Examples: while (gnl.hasNext()) { GradebookNode node =
gnl.getNextGradebookNode(); }
or
while (gnl.hasNext()) {
GradebookNode[] nodes = gnl.getNextGradebookNodes(gnl.available());
}
"""
def get_next_gradebook_node(self):
"""Gets the next ``GradebookNode`` in this list.
:return: the next ``GradebookNode`` in this list. The ``has_next()`` method should be used to test that a next ``GradebookNode`` is available before calling this method.
:rtype: ``osid.grading.GradebookNode``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookNode
next_gradebook_node = property(fget=get_next_gradebook_node)
def get_next_gradebook_nodes(self, n):
"""Gets the next set of ``GradebookNode`` elements in this list which must be less than or equal to the return from ``available()``.
:param n: the number of ``GradebookNode`` elements requested which must be less than or equal to ``available()``
:type n: ``cardinal``
:return: an array of ``GradebookNode`` elements.The length of the array is less than or equal to the number specified.
:rtype: ``osid.grading.GradebookNode``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradebookNode
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.